Datasets:

function_name
stringlengths
1
63
docstring
stringlengths
50
5.89k
masked_code
stringlengths
50
882k
implementation
stringlengths
169
12.9k
start_line
int32
1
14.6k
end_line
int32
16
14.6k
file_content
stringlengths
274
882k
note
Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ```
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass # MASKED: note function (lines 30-88) # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式')
30
88
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
note
Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ```
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass # MASKED: note function (lines 100-146) # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值')
100
146
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
note
Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ```
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass # MASKED: note function (lines 158-199) # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?')
158
199
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
note
Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ```
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ # MASKED: note function (lines 207-254) # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置')
207
254
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
note
Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ```
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ # MASKED: note function (lines 262-309) # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值')
262
309
# coding:utf-8 # usr/bin/python3 # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py """ Class Chapter28_1 Class Chapter28_2 Class Chapter28_3 Class Chapter28_4 Class Chapter28_5 """ from __future__ import absolute_import, division, print_function import numpy as np class Chapter28_1: """ chapter28.1 note and function """ def __init__(self): pass def note(self): """ Summary ==== Print chapter28.1 note Example ==== ```python Chapter28_1().note() ``` """ print('chapter28.1 note as follow') print('28.1 矩阵的性质') print('矩阵运算在科学计算中非常重要') print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]') print('矩阵和向量') print('单位矩阵') print('零矩阵') print('对角矩阵') print('三对角矩阵') print('上三角矩阵') print('下三角矩阵') print('置换矩阵') print('对称矩阵') print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律') print('矩阵的F范数和2范数') print('向量的2范数') print('矩阵的逆,秩和行列式') print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵') print('定理28.2 当且仅当A无空向量,矩阵A列满秩') print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的') print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质') print(' 如果A的任何行或者列的元素为0,则det(A)=0') print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式') print(' A的行列式的值与其转置矩阵A^T的行列式的值相等') print(' 行列式的任意两行(或者两列)互换,则其值异号') print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的') print('正定矩阵') print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的') print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的') print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵') print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C') print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵') print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵') print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘') print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数') print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的') print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0') print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)') print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_2: """ chapter28.2 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.2 note Example ==== ```python Chapter28_2().note() ``` ''' print('chapter28.2 note as follow') print('28.2 矩阵乘法的Strassen算法') print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)') print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY') print('算法概述') print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用') print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵') print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)') print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算') print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)') print('Strassen方法分为以下四个步骤') print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵') print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7') print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7') print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u') print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法') print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大') print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快') print(' 3) Strassen算法不像简单方法那样具有数值稳定性') print(' 4) 在递归层次中生成的子矩阵要消耗空间') # ! Strassen方法的关键就是对矩阵乘法作分治递归 print('练习28.2-1 运用Strassen算法计算矩阵的乘积') print('矩阵的乘积为:') print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])) print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)') print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少') print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法') print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积') print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_3: """ chapter28.3 note and function """ def __init__(self): pass def note(self): ''' Summary ==== Print chapter28.3 note Example ==== ```python Chapter28_3().note() ``` ''' print('chapter28.3 note as follow') print('28.3 求解线性方程组') print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R') print('LUP分解求解线性方程组') print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU') print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵') print('每一个非奇异矩阵A都有这样一种分解') print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统') print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解') print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb') print('正向替换与逆向替换') print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P') print('LU分解的计算') print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去') print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成') print('LUP分解的计算') print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值') print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU') print('练习28.3-1 运用正向替换法求解下列方程组') print('练习28.3-2 求出下列矩阵的LU分解') print('练习28.3-3 运用LUP分解来求解下列方程组') print('练习28.3-4 试描述一个对角矩阵的LUP分解') print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的') print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵') print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_4: """ chapter28.4 note and function """ def note(self): """ Summary ==== Print chapter28.4 note Example ==== ```python Chapter28_4().note() ``` """ print('chapter28.4 note as follow') print('28.4 矩阵求逆') print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组') print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵') print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题') print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆') print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生') print('根据LUP分解计算逆矩阵') print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU') print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解') print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解') print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同') print('矩阵乘法与逆矩阵') print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算') print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法') print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积') print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵') print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法') print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法') print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法') print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法') print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?') print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的') print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py class Chapter28_5: """ chapter28.5 note and function """ def note(self): """ Summary ==== Print chapter28.5 note Example ==== ```python Chapter28_5().note() ``` """ print('chapter28.5 note as follow') print('28.5 对称正定矩阵与最小二乘逼近') print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况') print('引理28.9 任意对称矩阵都是非奇异矩阵') print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的') print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T') print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的') print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形') print('最小二乘逼近') print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi') print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)') print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式') print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的') print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解') print('统计学中正态方程A^TAc=A^Ty') print('伪逆矩阵A+=(A^TA)^-1A^T') print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值') print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的') print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上') print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的') print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1') print('练习28.5-6 最小二乘法求') print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:') print(' AA^+A=A') print(' A^+AA^+=A^+') print(' (AA^+)^T=AA^+') print(' (A^+A)^T=A^+A') print('思考题28-1 三对角线性方程组') print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间') print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'< '可以在O(n)的时间内求出方程Ax=b的解') print('思考题28-2 三次样条插值') print(' 将一个曲线拟合为n个三次多项式组成') print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py chapter28_1 = Chapter28_1() chapter28_2 = Chapter28_2() chapter28_3 = Chapter28_3() chapter28_4 = Chapter28_4() chapter28_5 = Chapter28_5() def printchapter28note(): """ print chapter28 note. """ print('Run main : single chapter twenty-eight!') chapter28_1.note() chapter28_2.note() chapter28_3.note() chapter28_4.note() chapter28_5.note() # python src/chapter28/chapter28note.py # python3 src/chapter28/chapter28note.py if __name__ == '__main__': printchapter28note() else: pass
set_clip_grad_norm
Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self # MASKED: set_clip_grad_norm function (lines 219-230) def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self
219
230
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
set_dataset
Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test"
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self # MASKED: set_dataset function (lines 280-296) def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self
280
296
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
set_distributed_options
Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self # MASKED: set_distributed_options function (lines 336-401) def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self
336
401
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
set_hooks
Set hooks for task Args: hooks: List of hooks to apply during training
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self # MASKED: set_hooks function (lines 403-425) def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self
403
425
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
set_amp_args
Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self # MASKED: set_amp_args function (lines 451-512) def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self
451
512
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
set_mixup_transform
Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self # MASKED: set_mixup_transform function (lines 514-525) def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self
514
525
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import enum import json import logging import math import multiprocessing as mp import time from typing import Any, Dict, List, NamedTuple, Optional, Union import torch import torch.nn as nn from classy_vision.dataset import ClassyDataset, build_dataset from classy_vision.dataset.transforms.mixup import MixupTransform from classy_vision.generic.distributed_util import ( all_reduce_mean, barrier, init_distributed_data_parallel_model, is_distributed_training_run, ) from classy_vision.generic.util import ( Timer, copy_model_to_gpu, load_and_broadcast_checkpoint, master_params, recursive_copy_to_gpu, split_batchnorm_params, update_classy_state, ) from classy_vision.generic.util import get_torch_version from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks from classy_vision.losses import ClassyLoss, build_loss from classy_vision.meters import ClassyMeter, build_meters from classy_vision.models import ClassyModel, build_model from classy_vision.optim import ( ClassyOptimizer, build_optimizer, build_optimizer_schedulers, ) from classy_vision.optim.zero import ZeRO from torch.distributed import broadcast from . import register_task from .classy_task import ClassyTask try: import apex apex_available = True except ImportError: apex_available = False try: from torch.cuda.amp import GradScaler as TorchGradScaler except ImportError: pass try: from fairscale.optim.grad_scaler import ShardedGradScaler fairscale_available = True except ImportError: fairscale_available = False class AmpType(enum.Enum): # Automatic Mixed Precision supported types APEX = enum.auto() PYTORCH = enum.auto() class BroadcastBuffersMode(enum.Enum): DISABLED = enum.auto() # Enable DistributedDataParallel's broadcast_buffers option, synchronizing # model buffers every forward pass. FORWARD_PASS = enum.auto() # Similar to FORWARD_PASS, but only synchronizes model buffers once # per epoch, between train and test phases. If your motivation for # synchronizing buffers is for buffers to be consistent during eval, use # this instead of FORWARD_PASS to reduce training overhead. BEFORE_EVAL = enum.auto() class BatchNormSyncMode(enum.Enum): DISABLED = enum.auto() # No Synchronized Batch Normalization PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed class LastBatchInfo(NamedTuple): loss: torch.Tensor output: torch.Tensor target: torch.Tensor sample: Dict[str, Any] step_data: Dict[str, Any] @register_task("classification_task") class ClassificationTask(ClassyTask): """Basic classification training task. This task encapsultates all of the components and steps needed to train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`. Assumes a train / test phase per each epoch and that the datasets have the same API as the map-style Dataset class in `torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html #torch.utils.data.Dataset>`_ (in particular, this task makes use of the len). If you are using an `IterableDataset <https://pytorch.org/docs/ stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task may be appropriate. :var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used for computing the loss in each forward pass :var datasets: Mapping from a ``phase_type`` in ["train", "test'] to dataset used for training (or testing) :var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`) to calculate during training :var num_epochs: Number of epochs (passes over dataset) to train :var test_only: Used to only run the test phase :var base_model: Model to be trained, unwrapped in DDP or DP wrappers :var optimizer: Optimizer used in train step :var optimizer_schedulers: Dictionary. Key is the name of the optimizer option (e.g. lr), value is a ClassyParamScheduler :var checkpoint: Serializable dict which represents state in training :var phases: List of phase specific information, e.g. if phase is train / test. :var hooks: List of hooks to apply during training :var train: Phase type, if true it means we are training, false means testing :var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel) :var phase_idx: Current phase id, first phase is 0, if task has not started training then returns -1 :var train_phase_idx: Only counts train phases :var num_updates: Number of total parameter updates applied to model by the optimizer :var data_iterator: Iterator which can be used to obtain batches :var losses: Loss curve :var perf_log: list of training speed measurements, to be logged :var clip_grad_norm: maximum gradient norm (default None) :var simulated_global_batchsize: batch size simulated via gradient accumulation :var optimizer_period: apply optimizer after this many steps; derived from simulated_global_batchsize, default 1. """ def __init__(self): """Constructs a ClassificationTask""" super().__init__() self.base_loss = None self.datasets = {} self.meters = [] self.num_epochs = 1 self.test_phase_period = 1 self.train_phases_per_epoch = 0 self.test_only = False self.base_model = None self.optimizer = None self.optimizer_schedulers = {} self.checkpoint_dict = None self.checkpoint_path = None self.phases = [] self.hooks = [] self.train = True self.distributed_model = None self.distributed_loss = None self.phase_idx = -1 self.train_phase_idx = -1 self.num_updates = 0 self.dataloader = None self.data_iterator = None self.losses = [] self.broadcast_buffers_mode: BroadcastBuffersMode = ( BroadcastBuffersMode.BEFORE_EVAL ) self.amp_args = None self.amp_type = None self.amp_grad_scaler = None self.mixup_transform = None self.perf_log = [] self.last_batch = None self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED self.find_unused_parameters = False self.use_gpu = torch.cuda.is_available() self.dataloader_mp_context = "spawn" self.bn_weight_decay = False self._train_only = True self.clip_grad_norm = None self.simulated_global_batchsize = None self.optimizer_period = 1 self.ddp_bucket_cap_mb = 25 self.use_sharded_ddp = False self.fp16_grad_compress = False def set_use_sharded_ddp(self, use_sharded_ddp: bool): self.use_sharded_ddp = use_sharded_ddp if self.use_sharded_ddp: logging.info("Using Sharded DDP") return self def set_use_gpu(self, use_gpu: bool): self.use_gpu = use_gpu assert ( not self.use_gpu or torch.cuda.is_available() ), "CUDA required to train on GPUs" return self def set_clip_grad_norm(self, clip_grad_norm: Optional[float]): """Sets maximum gradient norm. None means gradient clipping is disabled. Defaults to None.""" self.clip_grad_norm = clip_grad_norm if clip_grad_norm is None: logging.info("Disabled gradient norm clipping.") else: logging.info( f"Enabled gradient norm clipping with threshold: {clip_grad_norm}" ) return self def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]): """Sets a simulated batch size by gradient accumulation. Gradient accumulation adds up gradients from multiple minibatches and steps the optimizer every N train_steps, where N is optimizer_period. When enabled, the very last train_steps might end up not updating the model, depending on the number of total steps. None means gradient accumulation is disabled. Defaults to None.""" self.simulated_global_batchsize = simulated_global_batchsize return self def set_checkpoint(self, checkpoint_path: str): """Sets checkpoint on task. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. See :func:`load_checkpoint` for more information. """ self.checkpoint_path = checkpoint_path return self def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]): """Sets the checkpoint dict in the task. Only used for testing. Args: checkpoint_dict: A serializable dict representing current task state """ self.checkpoint_dict = checkpoint_dict return self def set_num_epochs(self, num_epochs: Union[int, float]): """Set number of epochs to be run. Args: num_epochs: Number of epochs to run task """ self.num_epochs = num_epochs return self def set_test_phase_period(self, test_phase_period: int): """Set the period of test phase. Args: test_phase_period: The period of test phase """ self.test_phase_period = test_phase_period return self def set_dataset(self, dataset: ClassyDataset, phase_type: str): """Set dataset for phase type on task Args: dataset: ClassyDataset for returning samples. phase_type: str must be one of "train" or "test" """ assert phase_type in [ "train", "test", ], "phase_type must be in ['train', 'test']" self.datasets[phase_type] = dataset if phase_type == "train": self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1) else: self._train_only = False return self def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]): """Set the multiprocessing context used by the dataloader. The context can be either 'spawn', 'fork', 'forkserver' or None (uses the default context). See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context for more details.""" self.dataloader_mp_context = dataloader_mp_context return self def set_optimizer(self, optimizer: ClassyOptimizer): """Set optimizer for task Args: optimizer: optimizer for task """ self.optimizer = optimizer return self def set_loss(self, loss: ClassyLoss): """Set loss function for task Args: loss: loss for task """ self.base_loss = loss return self def set_meters(self, meters: List["ClassyMeter"]): """Set meters for task Args: meters: list of meters to compute during training """ self.meters = meters return self def set_distributed_options( self, broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int = 0, find_unused_parameters: bool = False, bucket_cap_mb: int = 25, fp16_grad_compress: bool = False, ): """Set distributed options. Args: broadcast_buffers_mode: Broadcast buffers mode. See :class:`BroadcastBuffersMode` for options. batch_norm_sync_mode: Batch normalization synchronization mode. See :class:`BatchNormSyncMode` for options. batch_norm_sync_group_size: Group size to use for synchronized batch norm. 0 means that the stats are synchronized across all replicas. For efficient synchronization, set it to the number of GPUs in a node ( usually 8). find_unused_parameters: See :class:`torch.nn.parallel.DistributedDataParallel` for information. bucket_cap_mb: See :class:`torch.nn.parallel.DistributedDataParallel` for information. Raises: RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex is not installed. """ self.broadcast_buffers_mode = broadcast_buffers_mode if batch_norm_sync_group_size > 0: if not batch_norm_sync_mode == BatchNormSyncMode.APEX: # this should ideally work with PyTorch Sync BN as well, but it # fails while initializing DDP for some reason. raise ValueError( "batch_norm_sync_group_size can be > 0 only when " "Apex Synchronized Batch Normalization is being used." ) self.batch_norm_sync_group_size = batch_norm_sync_group_size if batch_norm_sync_mode == BatchNormSyncMode.DISABLED: logging.info("Synchronized Batch Normalization is disabled") else: if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available: raise RuntimeError("apex is not installed") msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}" if self.batch_norm_sync_group_size > 0: msg += f" and group size {batch_norm_sync_group_size}" logging.info(msg) self.batch_norm_sync_mode = batch_norm_sync_mode if find_unused_parameters: logging.info("Enabling find_unused_parameters in DDP") self.find_unused_parameters = find_unused_parameters self.ddp_bucket_cap_mb = bucket_cap_mb if fp16_grad_compress: if get_torch_version() < [1, 8, 0]: raise RuntimeError( "FP16 grad compression is only supported since PyTorch 1.8" ) logging.info("Enabling FP16 grad compression") self.fp16_grad_compress = fp16_grad_compress return self def set_hooks(self, hooks: List["ClassyHook"]): """Set hooks for task Args: hooks: List of hooks to apply during training """ from classy_vision.hooks import ClassyHook assert isinstance(hooks, list) assert all(isinstance(hook, ClassyHook) for hook in hooks) assert len({hook.name() for hook in hooks}) == len( hooks ), "Cannot have repeated hooks of the same class" # TODO (zyan3): we move checkpoint hook to the end of the list because some hooks # may change the state of the model, and we want to save changed state in the checkpoint. # This is temporary fix. non_checkpoint_hooks = [ hook for hook in hooks if not isinstance(hook, CheckpointHook) ] checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)] hooks = non_checkpoint_hooks + checkpoint_hooks self.hooks = hooks return self def set_model(self, model: ClassyModel): """Set model for task Args: model: Model to be trained """ self.base_model = model return self def set_test_only(self, test_only: bool): """Set test only flag Args: test_only: If true, only test phases will be run """ self.test_only = test_only return self def set_bn_weight_decay(self, bn_weight_decay: bool): assert type(bn_weight_decay) == bool self.bn_weight_decay = bn_weight_decay return self def set_amp_args(self, amp_args: Optional[Dict[str, Any]]): """Disable / enable apex.amp and set the automatic mixed precision parameters. apex.amp can be utilized for mixed / half precision training. Args: amp_args: Dictionary containing arguments to be passed to amp.initialize. Set to None to disable amp. To enable mixed precision training, pass amp_args={"opt_level": "O1"} here. See https://nvidia.github.io/apex/amp.html for more info. Raises: RuntimeError: If opt_level is not None and apex is not installed. Warning: apex needs to be installed to utilize this feature. """ self.amp_args = amp_args if amp_args is None: logging.info("AMP disabled") else: # Check that the requested AMP type is known try: self.amp_type = AmpType[self.amp_args["amp_type"].upper()] except KeyError: logging.info("AMP type not specified, defaulting to Apex") self.amp_type = AmpType.APEX # Check for CUDA availability, required for both Apex and Pytorch AMP if not torch.cuda.is_available(): raise RuntimeError( "AMP is required but CUDA is not supported, cannot enable AMP" ) # Check for Apex availability if self.amp_type == AmpType.APEX and not apex_available: raise RuntimeError( "Apex AMP is required but Apex is not installed, cannot enable AMP" ) if self.use_sharded_ddp: if self.amp_type == AmpType.APEX: raise RuntimeError( "ShardedDDP has been requested, which is incompatible with Apex AMP" ) if not fairscale_available: raise RuntimeError( "ShardedDDP has been requested, but fairscale is not installed in the current environment" ) # Set Torch AMP grad scaler, used to prevent gradient underflow elif self.amp_type == AmpType.PYTORCH: if self.use_sharded_ddp: logging.info("Using ShardedGradScaler to manage Pytorch AMP") self.amp_grad_scaler = ShardedGradScaler() else: self.amp_grad_scaler = TorchGradScaler() logging.info(f"AMP enabled with args {amp_args}") return self def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]): """Disable / enable mixup transform for data augmentation Args:: mixup_transform: a callable object which performs mixup data augmentation """ self.mixup_transform = mixup_transform if mixup_transform is None: logging.info("mixup disabled") else: logging.info("mixup enabled") return self def set_optimizer_schedulers(self, schedulers): self.optimizer_schedulers = schedulers return self @classmethod def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask": """Instantiates a ClassificationTask from a configuration. Args: config: A configuration for a ClassificationTask. See :func:`__init__` for parameters expected in the config. Returns: A ClassificationTask instance. """ test_only = config.get("test_only", False) if not test_only: # TODO Make distinction between epochs and phases in optimizer clear train_phases_per_epoch = config["dataset"]["train"].get( "phases_per_epoch", 1 ) optimizer_config = config["optimizer"] optimizer_config["num_epochs"] = ( config["num_epochs"] * train_phases_per_epoch ) optimizer = build_optimizer(optimizer_config) param_schedulers = build_optimizer_schedulers(optimizer_config) datasets = {} phase_types = ["train", "test"] for phase_type in phase_types: if phase_type in config["dataset"]: datasets[phase_type] = build_dataset(config["dataset"][phase_type]) loss = build_loss(config["loss"]) amp_args = config.get("amp_args") meters = build_meters(config.get("meters", {})) model = build_model(config["model"]) mixup_transform = None if config.get("mixup") is not None: assert "alpha" in config["mixup"], "key alpha is missing in mixup dict" mixup_transform = MixupTransform( config["mixup"]["alpha"], config["mixup"].get("num_classes") ) # hooks config is optional hooks_config = config.get("hooks") hooks = [] if hooks_config is not None: hooks = build_hooks(hooks_config) distributed_config = config.get("distributed", {}) distributed_options = { "broadcast_buffers_mode": BroadcastBuffersMode[ distributed_config.get("broadcast_buffers", "before_eval").upper() ], "batch_norm_sync_mode": BatchNormSyncMode[ distributed_config.get("batch_norm_sync_mode", "disabled").upper() ], "batch_norm_sync_group_size": distributed_config.get( "batch_norm_sync_group_size", 0 ), "find_unused_parameters": distributed_config.get( "find_unused_parameters", False ), "bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25), "fp16_grad_compress": distributed_config.get("fp16_grad_compress", False), } task = ( cls() .set_num_epochs(config["num_epochs"]) .set_test_phase_period(config.get("test_phase_period", 1)) .set_loss(loss) .set_test_only(test_only) .set_model(model) .set_meters(meters) .set_amp_args(amp_args) .set_mixup_transform(mixup_transform) .set_distributed_options(**distributed_options) .set_hooks(hooks) .set_bn_weight_decay(config.get("bn_weight_decay", False)) .set_clip_grad_norm(config.get("clip_grad_norm")) .set_simulated_global_batchsize(config.get("simulated_global_batchsize")) .set_use_sharded_ddp(config.get("use_sharded_ddp", False)) ) if not test_only: task.set_optimizer(optimizer) task.set_optimizer_schedulers(param_schedulers) use_gpu = config.get("use_gpu") if use_gpu is not None: task.set_use_gpu(use_gpu) for phase_type in datasets: task.set_dataset(datasets[phase_type], phase_type) # NOTE: this is a private member and only meant to be used for # logging/debugging purposes. See __repr__ implementation task._config = config return task @property def num_batches_per_phase(self): """Returns number of batches in current phase iterator""" return len(self.data_iterator) @property def model(self): """Returns model used in training (can be wrapped with DDP)""" return ( self.distributed_model if is_distributed_training_run() else self.base_model ) @property def loss(self): """Returns loss used in training (can be wrapped with DDP)""" return self.distributed_loss if self.distributed_loss else self.base_loss @property def phase_type(self): """Returns current phase type. String with value "train" or "test" """ return "train" if self.train else "test" @property def eval_phase_idx(self): """Returns current evaluation phase""" return self.phase_idx - self.train_phase_idx - 1 def get_total_training_phases(self): """ Returns the total number of "train" phases in the task """ num_training_phases = 0 for phase in self.phases: if phase["train"] is True: num_training_phases += 1 return num_training_phases def get_total_test_phases(self): """ Returns the total number of "test" phases in the task """ num_test_phases = 0 for phase in self.phases: if phase["train"] is False: num_test_phases += 1 return num_test_phases def _build_phases(self): """Returns list of phases from config. These phases will look like: { train: is this a train or test phase? optimizer: optimizer settings } - If this is a test only run, then only test phases will be generated - If this is a training run with both train and test datasets, then x phases = x train phases + x test phases, interleaved. If test_phase_period > 1, test phases are only added after test_phase_period train phases. The last phase is always a test phase. - If this is a training run with only a train dataset, then x phases = x train phases. """ if not self.test_only: phases = [ {"train": True} for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs)) ] if self._train_only: return phases final_phases = [] for i, phase in enumerate(phases): final_phases.append(phase) if (i + 1) % self.test_phase_period == 0: final_phases.append({"train": False}) if final_phases[-1]["train"]: final_phases.append({"train": False}) return final_phases return [{"train": False} for _ in range(self.num_epochs)] def build_dataloader_from_dataset(self, dataset, **kwargs): """Builds a dataloader from the provided dataset Args: dataset: A ClassyDataset kwargs: Additional kwargs to pass during dataloader construction for derived classes """ return dataset.iterator( phase_type=self.phase_type, current_phase_id=self.train_phase_idx if self.train else 0, pin_memory=self.use_gpu and torch.cuda.device_count() > 1, multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs, ) def build_dataloaders_for_current_phase(self): """Builds dataloader(s) for the current phase. Deriving classes can override this method to support custom behavior, like supporting multiple dataloaders in parallel. """ self.dataloader = self.build_dataloader_from_dataset( self.datasets[self.phase_type] ) def prepare_optimizer(self, optimizer, model, loss=None): bn_params, other_params = split_batchnorm_params(model) if loss is not None: bn_params_loss, params_loss = split_batchnorm_params(loss) bn_params = bn_params + bn_params_loss other_params = other_params + params_loss bn_schedulers = self.optimizer_schedulers.copy() if not self.bn_weight_decay: bn_schedulers["weight_decay"] = 0 param_groups = [{"params": other_params, **self.optimizer_schedulers}] if len(bn_params) > 0: param_groups.append({"params": bn_params, **bn_schedulers}) self.optimizer.set_param_groups(param_groups) def prepare(self): """Prepares task for training, populates all derived attributes """ self.phases = self._build_phases() self.train = False if self.test_only else self.train if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH: self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model) elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX: sync_bn_process_group = apex.parallel.create_syncbn_process_group( self.batch_norm_sync_group_size ) self.base_model = apex.parallel.convert_syncbn_model( self.base_model, process_group=sync_bn_process_group ) # move the model and loss to the right device if self.use_gpu: self.base_model, self.base_loss = copy_model_to_gpu( self.base_model, self.base_loss ) else: self.base_loss.cpu() self.base_model.cpu() if self.optimizer is not None: self.prepare_optimizer( optimizer=self.optimizer, model=self.base_model, loss=self.base_loss ) if self.amp_args is not None: if self.amp_type == AmpType.APEX: # Initialize apex.amp. This updates the model and the PyTorch optimizer ( # if training, which is wrapped by the ClassyOptimizer in self.optimizer). # Please note this must happen before loading the checkpoint, cause # there's amp state to be restored. if self.optimizer is None: self.base_model = apex.amp.initialize( self.base_model, optimizers=None, **self.amp_args ) else: self.base_model, self.optimizer.optimizer = apex.amp.initialize( self.base_model, self.optimizer.optimizer, **self.amp_args ) if self.simulated_global_batchsize is not None: if self.simulated_global_batchsize % self.get_global_batchsize() != 0: raise ValueError( f"Global batch size ({self.get_global_batchsize()}) must divide " f"simulated_global_batchsize ({self.simulated_global_batchsize})" ) else: self.simulated_global_batchsize = self.get_global_batchsize() self.optimizer_period = ( self.simulated_global_batchsize // self.get_global_batchsize() ) if self.optimizer_period > 1: logging.info( f"Using gradient accumulation with a period of {self.optimizer_period}" ) if self.checkpoint_path: self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path) classy_state_dict = ( None if self.checkpoint_dict is None else self.checkpoint_dict["classy_state_dict"] ) if classy_state_dict is not None: state_load_success = update_classy_state(self, classy_state_dict) assert ( state_load_success ), "Update classy state from checkpoint was unsuccessful." self.init_distributed_data_parallel_model() def init_distributed_data_parallel_model(self): """ Initialize `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/ docs/stable/nn.html#distributeddataparallel>`_. Needed for distributed training. This is where a model should be wrapped by DDP. """ if not is_distributed_training_run(): return assert ( self.distributed_model is None ), "init_ddp_non_elastic must only be called once" broadcast_buffers = ( self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS ) if self.use_sharded_ddp: if not isinstance(self.optimizer, ZeRO): raise ValueError( "ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer" ) from fairscale.nn.data_parallel import ShardedDataParallel # Replace the original DDP wrap by the shard-aware ShardedDDP self.distributed_model = ShardedDataParallel( module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, ) else: self.distributed_model = init_distributed_data_parallel_model( self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) if self.fp16_grad_compress: from torch.distributed.algorithms import ddp_comm_hooks # FP16 hook is stateless and only takes a process group as the state. # We use the default process group so we set the state to None. process_group = None self.distributed_model.register_comm_hook( process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook, ) if ( isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters() ): logging.info("Initializing distributed loss") self.distributed_loss = init_distributed_data_parallel_model( self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb, ) @property def where(self): """Returns the proportion of training that has completed. If in test only mode, returns proportion of testing completed Returned value is a float in the range [0, 1) """ current_step = self.num_updates / self.get_global_batchsize() num_phases = ( self.get_total_test_phases() if self.test_only else self.get_total_training_phases() ) if self.num_batches_per_phase <= 0: raise RuntimeError("No batches to read. Is the dataset empty?") num_steps = num_phases * self.num_batches_per_phase where = current_step / num_steps return where def get_classy_state(self, deep_copy: bool = False): """Returns serialiable state of task Args: deep_copy: If true, does a deep copy of state before returning. """ optimizer_state = {} if self.optimizer is not None: optimizer_state = self.optimizer.get_classy_state() classy_state_dict = { "train": self.train, "base_model": self.base_model.get_classy_state(), "meters": [meter.get_classy_state() for meter in self.meters], "optimizer": optimizer_state, "phase_idx": self.phase_idx, "train_phase_idx": self.train_phase_idx, "num_updates": self.num_updates, "losses": self.losses, "hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks}, "loss": {}, } if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): classy_state_dict["train_dataset_iterator"] = self.datasets[ "train" ].get_classy_state() if isinstance(self.base_loss, ClassyLoss): classy_state_dict["loss"] = self.base_loss.get_classy_state() if self.amp_args is not None: if self.amp_type == AmpType.APEX: classy_state_dict["amp"] = apex.amp.state_dict() elif self.amp_grad_scaler is not None: classy_state_dict["amp"] = self.amp_grad_scaler.state_dict() if deep_copy: classy_state_dict = copy.deepcopy(classy_state_dict) return classy_state_dict def set_classy_state(self, state): """Set task state Args: state: Dict containing state of a task """ # some settings are different in test only self.train = False if self.test_only else state["train"] if not self.test_only: self.phase_idx = state["phase_idx"] self.num_updates = state["num_updates"] self.train_phase_idx = state["train_phase_idx"] self.losses = state["losses"] for meter, meter_state in zip(self.meters, state["meters"]): meter.set_classy_state(meter_state) self.base_model.set_classy_state(state["base_model"]) if self.optimizer is not None: self.optimizer.set_classy_state(state["optimizer"]) if state.get("loss") and isinstance(self.base_loss, ClassyLoss): self.base_loss.set_classy_state(state["loss"]) if "amp" in state: if self.amp_type == AmpType.APEX: apex.amp.load_state_dict(state["amp"]) else: self.amp_grad_scaler.load_state_dict(state["amp"]) for hook in self.hooks: # we still want to be able to run when new hooks are added or old # hooks are removed if hook.name() in state["hooks"]: hook.set_classy_state(state["hooks"][hook.name()]) else: logging.warning(f"No state found for hook: {hook.name()}") if "train" in self.datasets and self._is_checkpointable_dataset( self.datasets["train"] ): self.datasets["train"].set_classy_state(state.get("train_dataset_iterator")) @staticmethod def _is_checkpointable_dataset(dataset): return hasattr(dataset, "get_classy_state") and hasattr( dataset, "set_classy_state" ) def eval_step(self): self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) with torch.no_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.check_inf_nan(loss) self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def check_inf_nan(self, loss): if loss == float("inf") or loss == float("-inf") or loss != loss: raise FloatingPointError(f"Loss is infinity or NaN: {loss}") def _should_do_step(self): """Tells if we will be performing an optimizer step. Returns True always if there is no gradient accumulation. With gradient accumulation returns True only when the gradients will be synchronized and we will be performing an optimizer step. """ update_idx = self.num_updates // self.get_global_batchsize() return (update_idx % self.optimizer_period) == self.optimizer_period - 1 def train_step(self): """Train step to be executed in train loop.""" self.last_batch = None # Process next sample with Timer() as timer: sample = next(self.data_iterator) assert isinstance(sample, dict) and "input" in sample and "target" in sample, ( f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys" ) # Copy sample to GPU target = sample["target"] if self.use_gpu: sample = recursive_copy_to_gpu(sample, non_blocking=True) if self.mixup_transform is not None: sample = self.mixup_transform(sample) # Optional Pytorch AMP context torch_amp_context = ( torch.cuda.amp.autocast() if self.amp_type == AmpType.PYTORCH else contextlib.suppress() ) # only sync with DDP when we need to perform an optimizer step # an optimizer step can be skipped if gradient accumulation is enabled do_step = self._should_do_step() ctx_mgr_model = ( self.distributed_model.no_sync() if self.distributed_model is not None and not do_step else contextlib.suppress() ) ctx_mgr_loss = ( self.distributed_loss.no_sync() if self.distributed_loss is not None and not do_step else contextlib.suppress() ) with ctx_mgr_model, ctx_mgr_loss: # Forward pass with torch.enable_grad(), torch_amp_context: output = self.model(sample["input"]) local_loss = self.compute_loss(output, sample) loss = local_loss.detach().clone() self.losses.append(loss.data.cpu().item() * target.size(0)) self.update_meters(output, sample) # Backwards pass + optimizer step self.run_optimizer(local_loss) self.num_updates += self.get_global_batchsize() # Move some data to the task so hooks get a chance to access it self.last_batch = LastBatchInfo( loss=loss, output=output, target=target, sample=sample, step_data={"sample_fetch_time": timer.elapsed_time}, ) def compute_loss(self, model_output, sample): return self.loss(model_output, sample["target"]) def run_optimizer(self, loss): """Runs backwards pass and update the optimizer""" self.check_inf_nan(loss) # Gradient accumulation logic. We always set optimizer_period, even # if gradient accumulation is disabled. Assumes all batches have the # same size update_idx = self.num_updates // self.get_global_batchsize() do_zero_grad = (update_idx % self.optimizer_period) == 0 do_step = self._should_do_step() if do_zero_grad: self.optimizer.zero_grad() if self.amp_type == AmpType.APEX: with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss: scaled_loss.backward() elif self.amp_type == AmpType.PYTORCH: self.amp_grad_scaler.scale(loss).backward() else: loss.backward() if do_step: # Handle gradient accumulation related gradient rescaling if self.optimizer_period != 1: self._rescale_gradients(1 / self.optimizer_period) # Clipping must happen after grad accumulation if self.clip_grad_norm is not None: self._clip_gradients(self.clip_grad_norm) if self.amp_type == AmpType.PYTORCH: # If using mixed precision, handle underflow-related scaling # See https://pytorch.org/docs/stable/amp.html#gradient-scaling # for context self.amp_grad_scaler.step(self.optimizer, where=self.where) self.amp_grad_scaler.update() else: self.optimizer.step(where=self.where) def _rescale_gradients(self, scale): for param in master_params(self.optimizer): if param.grad is not None: param.grad.data.mul_(scale) def _clip_gradients(self, max_norm): nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm) def update_meters(self, model_output, sample): target = sample["target"].detach().cpu() model_output = model_output.detach().cpu() # Update meters for meter in self.meters: meter.update(model_output, target, is_train=self.train) def synchronize_losses(self): """Average the losses across the different replicas""" # Average losses across nodes losses_tensor = torch.tensor(self.losses) synchronized_losses_tensor = all_reduce_mean(losses_tensor) self.losses = synchronized_losses_tensor.tolist() def advance_phase(self): """Performs bookkeeping / task updates between phases Increments phase idx, resets meters, resets loss history, resets counters, shuffles dataset, rebuilds iterators, and sets the train / test state for phase. """ logging.debug("Advancing phase") # Reset meters for next phase / epoch for meter in self.meters: meter.reset() # Reset loss history for next epoch self.losses = [] # Setup new phase self.phase_idx += 1 phase = self.phases[self.phase_idx] self.train = True if phase["train"] else False if self.train: self.train_phase_idx += 1 # Re-build dataloader & re-create iterator anytime membership changes. self.build_dataloaders_for_current_phase() self.create_data_iterators() # Set up pytorch module in train vs eval mode, update optimizer. self._set_model_train_mode() def done_training(self): """Stop condition for training""" return self.phase_idx + 1 >= len(self.phases) def create_data_iterators(self): """Creates data iterator(s) for the current phase.""" # Delete iterator explicitly so that all dataloader processes # are cleaned up. del self.data_iterator self.data_iterator = iter(self.dataloader) def _set_model_train_mode(self): """Set train mode for model""" phase = self.phases[self.phase_idx] self.base_model.train(phase["train"]) self.base_loss.train(phase["train"]) if ( self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL and not self.train ): self._broadcast_buffers() def _broadcast_buffers(self): """Explicitly synchronize buffers across all devices.""" if self.distributed_model is None: return buffers = list(self.base_model.buffers()) if len(buffers) > 0: logging.info("Synchronizing buffers before evaluation.") for buffer in buffers: broadcast(buffer, 0, group=self.distributed_model.process_group) # TODO: Functions below should be better abstracted into the dataloader # abstraction def get_batchsize_per_replica(self): """Return local replica's batchsize for dataset (e.g. batchsize per GPU)""" return self.datasets[self.phase_type].get_batchsize_per_replica() def get_global_batchsize(self): """Return global batchsize across all trainers""" return self.datasets[self.phase_type].get_global_batchsize() def on_start(self): for hook in self.hooks: hook.on_start(self) def on_phase_start(self): self.phase_start_time_total = time.perf_counter() self.advance_phase() for hook in self.hooks: hook.on_phase_start(self) self.phase_start_time_train = time.perf_counter() def on_phase_end(self): self.log_phase_end("train") if self.train: self.optimizer.on_epoch(where=self.where) logging.debug("Syncing losses on phase end...") self.synchronize_losses() logging.debug("...losses synced") logging.debug("Syncing meters on phase end...") for meter in self.meters: meter.sync_state() logging.debug("...meters synced") barrier() for hook in self.hooks: hook.on_phase_end(self) self.perf_log = [] self.log_phase_end("total") def on_end(self): for hook in self.hooks: hook.on_end(self) def log_phase_end(self, tag): if not self.train: return start_time = ( self.phase_start_time_train if tag == "train" else self.phase_start_time_total ) phase_duration = time.perf_counter() - start_time im_per_sec = ( self.get_global_batchsize() * self.num_batches_per_phase ) / phase_duration self.perf_log.append( { "tag": tag, "phase_idx": self.train_phase_idx, "epoch_duration": phase_duration, "im_per_sec": im_per_sec, } ) def __repr__(self): if hasattr(self, "_config"): config = json.dumps(self._config, indent=4) return f"{super().__repr__()} initialized with config:\n{config}" return super().__repr__()
__init__
@param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file.
#!/usr/bin/env python # # Init file for Shotgun event daemon # # chkconfig: 345 99 00 # description: Shotgun event daemon # ### BEGIN INIT INFO # Provides: shotgunEvent # Required-Start: $network # Should-Start: $remote_fs # Required-Stop: $network # Should-Stop: $remote_fs # Default-Start: 2 3 4 5 # Short-Description: Shotgun event daemon # Description: Shotgun event daemon ### END INIT INFO """ For an overview of shotgunEvents, please see raw documentation in the docs folder or an html compiled version at: http://shotgunsoftware.github.com/shotgunEvents """ from __future__ import print_function __version__ = "1.0" __version_info__ = (1, 0) # Suppress the deprecation warning about imp until we get around to replacing it import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import imp import datetime import logging import logging.handlers import os import pprint import socket import sys import time import traceback from six.moves import configparser import six.moves.cPickle as pickle from distutils.version import StrictVersion if sys.platform == "win32": import win32serviceutil import win32service import win32event import servicemanager import daemonizer import shotgun_api3 as sg from shotgun_api3.lib.sgtimezone import SgTimezone SG_TIMEZONE = SgTimezone() CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0]) PYTHON_26 = StrictVersion("2.6") PYTHON_27 = StrictVersion("2.7") EMAIL_FORMAT_STRING = """Time: %(asctime)s Logger: %(name)s Path: %(pathname)s Function: %(funcName)s Line: %(lineno)d %(message)s""" def _setFilePathOnLogger(logger, path): # Remove any previous handler. _removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler) # Add the file handler handler = logging.handlers.TimedRotatingFileHandler( path, "midnight", backupCount=10 ) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) logger.addHandler(handler) def _removeHandlersFromLogger(logger, handlerTypes=None): """ Remove all handlers or handlers of a specified type from a logger. @param logger: The logger who's handlers should be processed. @type logger: A logging.Logger object @param handlerTypes: A type of handler or list/tuple of types of handlers that should be removed from the logger. If I{None}, all handlers are removed. @type handlerTypes: L{None}, a logging.Handler subclass or I{list}/I{tuple} of logging.Handler subclasses. """ for handler in logger.handlers: if handlerTypes is None or isinstance(handler, handlerTypes): logger.removeHandler(handler) def _addMailHandlerToLogger( logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None, ): """ Configure a logger with a handler that sends emails to specified addresses. The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}. @note: Any SMTPHandler already connected to the logger will be removed. @param logger: The logger to configure @type logger: A logging.Logger instance @param toAddrs: The addresses to send the email to. @type toAddrs: A list of email addresses that will be passed on to the SMTPHandler. """ if smtpServer and fromAddr and toAddrs and emailSubject: mailHandler = CustomSMTPHandler( smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure ) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler) class Config(configparser.SafeConfigParser): def __init__(self, path): configparser.SafeConfigParser.__init__(self, os.environ) self.read(path) def getShotgunURL(self): return self.get("shotgun", "server") def getEngineScriptName(self): return self.get("shotgun", "name") def getEngineScriptKey(self): return self.get("shotgun", "key") def getEngineProxyServer(self): try: proxy_server = self.get("shotgun", "proxy_server").strip() if not proxy_server: return None return proxy_server except configparser.NoOptionError: return None def getEventIdFile(self): return self.get("daemon", "eventIdFile") def getEnginePIDFile(self): return self.get("daemon", "pidFile") def getPluginPaths(self): return [s.strip() for s in self.get("plugins", "paths").split(",")] def getSMTPServer(self): return self.get("emails", "server") def getSMTPPort(self): if self.has_option("emails", "port"): return self.getint("emails", "port") return 25 def getFromAddr(self): return self.get("emails", "from") def getToAddrs(self): return [s.strip() for s in self.get("emails", "to").split(",")] def getEmailSubject(self): return self.get("emails", "subject") def getEmailUsername(self): if self.has_option("emails", "username"): return self.get("emails", "username") return None def getEmailPassword(self): if self.has_option("emails", "password"): return self.get("emails", "password") return None def getSecureSMTP(self): if self.has_option("emails", "useTLS"): return self.getboolean("emails", "useTLS") or False return False def getLogMode(self): return self.getint("daemon", "logMode") def getLogLevel(self): return self.getint("daemon", "logging") def getMaxEventBatchSize(self): if self.has_option("daemon", "max_event_batch_size"): return self.getint("daemon", "max_event_batch_size") return 500 def getLogFile(self, filename=None): if filename is None: if self.has_option("daemon", "logFile"): filename = self.get("daemon", "logFile") else: raise ConfigError("The config file has no logFile option.") if self.has_option("daemon", "logPath"): path = self.get("daemon", "logPath") if not os.path.exists(path): os.makedirs(path) elif not os.path.isdir(path): raise ConfigError( "The logPath value in the config should point to a directory." ) path = os.path.join(path, filename) else: path = filename return path def getTimingLogFile(self): if ( not self.has_option("daemon", "timing_log") or self.get("daemon", "timing_log") != "on" ): return None return self.getLogFile() + ".timing" class Engine(object): """ The engine holds the main loop of event processing. """ def __init__(self, configPath): """ """ self._continue = True self._eventIdData = {} # Read/parse the config self.config = Config(configPath) # Get config values self._pluginCollections = [ PluginCollection(self, s) for s in self.config.getPluginPaths() ] self._sg = sg.Shotgun( self.config.getShotgunURL(), self.config.getEngineScriptName(), self.config.getEngineScriptKey(), http_proxy=self.config.getEngineProxyServer(), ) self._max_conn_retries = self.config.getint("daemon", "max_conn_retries") self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep") self._fetch_interval = self.config.getint("daemon", "fetch_interval") self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid") # Setup the loggers for the main engine if self.config.getLogMode() == 0: # Set the root logger for file output. rootLogger = logging.getLogger() rootLogger.config = self.config _setFilePathOnLogger(rootLogger, self.config.getLogFile()) print(self.config.getLogFile()) # Set the engine logger for email output. self.log = logging.getLogger("engine") self.setEmailsOnLogger(self.log, True) else: # Set the engine logger for file and email output. self.log = logging.getLogger("engine") self.log.config = self.config _setFilePathOnLogger(self.log, self.config.getLogFile()) self.setEmailsOnLogger(self.log, True) self.log.setLevel(self.config.getLogLevel()) # Setup the timing log file timing_log_filename = self.config.getTimingLogFile() if timing_log_filename: self.timing_logger = logging.getLogger("timing") self.timing_logger.setLevel(self.config.getLogLevel()) _setFilePathOnLogger(self.timing_logger, timing_log_filename) else: self.timing_logger = None super(Engine, self).__init__() def setEmailsOnLogger(self, logger, emails): # Configure the logger for email output _removeHandlersFromLogger(logger, logging.handlers.SMTPHandler) if emails is False: return smtpServer = self.config.getSMTPServer() smtpPort = self.config.getSMTPPort() fromAddr = self.config.getFromAddr() emailSubject = self.config.getEmailSubject() username = self.config.getEmailUsername() password = self.config.getEmailPassword() if self.config.getSecureSMTP(): secure = (None, None) else: secure = None if emails is True: toAddrs = self.config.getToAddrs() elif isinstance(emails, (list, tuple)): toAddrs = emails else: msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s." raise ValueError(msg % type(emails)) _addMailHandlerToLogger( logger, (smtpServer, smtpPort), fromAddr, toAddrs, emailSubject, username, password, secure, ) def start(self): """ Start the processing of events. The last processed id is loaded up from persistent storage on disk and the main loop is started. """ # TODO: Take value from config socket.setdefaulttimeout(60) # Notify which version of shotgun api we are using self.log.info("Using SG Python API version %s" % sg.__version__) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning("Keyboard interrupt. Cleaning up...") except Exception as err: msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s" self.log.critical(msg, type(err), traceback.format_exc(err)) def _loadEventIdData(self): """ Load the last processed event id from the disk If no event has ever been processed or if the eventIdFile has been deleted from disk, no id will be recoverable. In this case, we will try contacting Shotgun to get the latest event's id and we'll start processing from there. """ eventIdFile = self.config.getEventIdFile() if eventIdFile and os.path.exists(eventIdFile): try: fh = open(eventIdFile, "rb") try: self._eventIdData = pickle.load(fh) # Provide event id info to the plugin collections. Once # they've figured out what to do with it, ask them for their # last processed id. noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) # If we don't have a state it means there's no match # in the id file. First we'll search to see the latest id a # matching plugin name has elsewhere in the id file. We do # this as a fallback in case the plugins directory has been # moved. If there's no match, use the latest event id # in Shotgun. if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for pluginName, pluginState in collection.items(): if pluginName in maxPluginStates.keys(): if pluginState[0] > maxPluginStates[pluginName][0]: maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if pluginName in maxPluginStates.keys(): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() # Backwards compatibility: # Reopen the file to try to read an old-style int fh = open(eventIdFile, "rb") line = fh.readline().strip() if line.isdigit(): # The _loadEventIdData got an old-style id file containing a single # int which is the last id properly processed. lastEventId = int(line) self.log.debug( "Read last event id (%d) from file.", lastEventId ) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError( "Could not load event id from file.\n\n%s" % traceback.format_exc(err) ) else: # No id file? # Get the event data from the database. lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData() def _getLastEventIdFromDatabase(self): conn_attempts = 0 lastEventId = None while lastEventId is None: order = [{"column": "id", "direction": "desc"}] try: result = self._sg.find_one( "EventLogEntry", filters=[], fields=["id"], order=order ) except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) else: lastEventId = result["id"] self.log.info("Last event id (%d) from the SG database.", lastEventId) return lastEventId def _mainLoop(self): """ Run the event processing loop. General behavior: - Load plugins from disk - see L{load} method. - Get new events from Shotgun - Loop through events - Loop through each plugin - Loop through each callback - Send the callback an event - Once all callbacks are done in all plugins, save the eventId - Go to the next event - Once all events are processed, wait for the defined fetch interval time and start over. Caveats: - If a plugin is deemed "inactive" (an error occured during registration), skip it. - If a callback is deemed "inactive" (an error occured during callback execution), skip it. - Each time through the loop, if the pidFile is gone, stop. """ self.log.debug("Starting the event processing loop.") while self._continue: # Process events events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() # if we're lagging behind Shotgun, we received a full batch of events # skip the sleep() call in this case if len(events) < self.config.getMaxEventBatchSize(): time.sleep(self._fetch_interval) # Reload plugins for collection in self._pluginCollections: collection.load() # Make sure that newly loaded events have proper state. self._loadEventIdData() self.log.debug("Shuting down event processing loop.") def stop(self): self._continue = False def _getNewEvents(self): """ Fetch new events from Shotgun. @return: Recent events that need to be processed by the engine. @rtype: I{list} of Shotgun event dictionaries. """ nextEventId = None for newId in [ coll.getNextUnprocessedEventId() for coll in self._pluginCollections ]: if newId is not None and (nextEventId is None or newId < nextEventId): nextEventId = newId if nextEventId is not None: filters = [["id", "greater_than", nextEventId - 1]] fields = [ "id", "event_type", "attribute_name", "meta", "entity", "user", "project", "session_uuid", "created_at", ] order = [{"column": "id", "direction": "asc"}] conn_attempts = 0 while True: try: events = self._sg.find( "EventLogEntry", filters, fields, order, limit=self.config.getMaxEventBatchSize(), ) if events: self.log.debug( "Got %d events: %d to %d.", len(events), events[0]["id"], events[-1]["id"], ) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts( conn_attempts, str(err) ) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return [] def _saveEventIdData(self): """ Save an event Id to persistant storage. Next time the engine is started it will try to read the event id from this location to know at which event it should start processing. """ eventIdFile = self.config.getEventIdFile() if eventIdFile is not None: for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for colPath, state in self._eventIdData.items(): if state: try: with open(eventIdFile, "wb") as fh: # Use protocol 2 so it can also be loaded in Python 2 pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error( "Can not write event id data to %s.\n\n%s", eventIdFile, traceback.format_exc(err), ) break else: self.log.warning("No state was found. Not saving to disk.") def _checkConnectionAttempts(self, conn_attempts, msg): conn_attempts += 1 if conn_attempts == self._max_conn_retries: self.log.error( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) conn_attempts = 0 time.sleep(self._conn_retry_sleep) else: self.log.warning( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) return conn_attempts class PluginCollection(object): """ A group of plugin files in a location on the disk. """ def __init__(self, engine, path): if not os.path.isdir(path): raise ValueError("Invalid path: %s" % path) self._engine = engine self.path = path self._plugins = {} self._stateData = {} def setState(self, state): if isinstance(state, int): for plugin in self: plugin.setState(state) self._stateData[plugin.getName()] = plugin.getState() else: self._stateData = state for plugin in self: pluginState = self._stateData.get(plugin.getName()) if pluginState: plugin.setState(pluginState) def getState(self): for plugin in self: self._stateData[plugin.getName()] = plugin.getState() return self._stateData def getNextUnprocessedEventId(self): eId = None for plugin in self: if not plugin.isActive(): continue newId = plugin.getNextUnprocessedEventId() if newId is not None and (eId is None or newId < eId): eId = newId return eId def process(self, event): for plugin in self: if plugin.isActive(): plugin.process(event) else: plugin.logger.debug("Skipping: inactive.") def load(self): """ Load plugins from disk. General behavior: - Loop on all paths. - Find all valid .py plugin files. - Loop on all plugin files. - For any new plugins, load them, otherwise, refresh them. """ newPlugins = {} for basename in os.listdir(self.path): if not basename.endswith(".py") or basename.startswith("."): continue if basename in self._plugins: newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin( self._engine, os.path.join(self.path, basename) ) newPlugins[basename].load() self._plugins = newPlugins def __iter__(self): for basename in sorted(self._plugins.keys()): yield self._plugins[basename] class Plugin(object): """ The plugin class represents a file on disk which contains one or more callbacks. """ # MASKED: __init__ function (lines 723-753) def getName(self): return self._pluginName def setState(self, state): if isinstance(state, int): self._lastEventId = state elif isinstance(state, tuple): self._lastEventId, self._backlog = state else: raise ValueError("Unknown state type: %s." % type(state)) def getState(self): return (self._lastEventId, self._backlog) def getNextUnprocessedEventId(self): if self._lastEventId: nextId = self._lastEventId + 1 else: nextId = None now = datetime.datetime.now() for k in list(self._backlog): v = self._backlog[k] if v < now: self.logger.warning("Timeout elapsed on backlog event id %d.", k) del self._backlog[k] elif nextId is None or k < nextId: nextId = k return nextId def isActive(self): """ Is the current plugin active. Should it's callbacks be run? @return: True if this plugin's callbacks should be run, False otherwise. @rtype: I{bool} """ return self._active def setEmails(self, *emails): """ Set the email addresses to whom this plugin should send errors. @param emails: See L{LogFactory.getLogger}'s emails argument for info. @type emails: A I{list}/I{tuple} of email addresses or I{bool}. """ self._engine.setEmailsOnLogger(self.logger, emails) def load(self): """ Load/Reload the plugin and all its callbacks. If a plugin has never been loaded it will be loaded normally. If the plugin has been loaded before it will be reloaded only if the file has been modified on disk. In this event callbacks will all be cleared and reloaded. General behavior: - Try to load the source of the plugin. - Try to find a function called registerCallbacks in the file. - Try to run the registration function. At every step along the way, if any error occurs the whole plugin will be deactivated and the function will return. """ # Check file mtime mtime = os.path.getmtime(self._path) if self._mtime is None: self._engine.log.info("Loading plugin at %s" % self._path) elif self._mtime < mtime: self._engine.log.info("Reloading plugin at %s" % self._path) else: # The mtime of file is equal or older. We don't need to do anything. return # Reset values self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error( "Could not load the plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) return regFunc = getattr(plugin, "registerCallbacks", None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical( "Error running register callback function from plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) self._active = False else: self._engine.log.critical( "Did not find a registerCallbacks function in plugin at %s.", self._path ) self._active = False def registerCallback( self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True, ): """ Register a callback in the plugin. """ global sg sgConnection = sg.Shotgun( self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer(), ) self._callbacks.append( Callback( callback, self, self._engine, sgConnection, matchEvents, args, stopOnError, ) ) def process(self, event): if event["id"] in self._backlog: if self._process(event): self.logger.info("Processed id %d from backlog." % event["id"]) del self._backlog[event["id"]] self._updateLastEventId(event) elif self._lastEventId is not None and event["id"] <= self._lastEventId: msg = "Event %d is too old. Last event processed was (%d)." self.logger.debug(msg, event["id"], self._lastEventId) else: if self._process(event): self._updateLastEventId(event) return self._active def _process(self, event): for callback in self: if callback.isActive(): if callback.canProcess(event): msg = "Dispatching event %d to callback %s." self.logger.debug(msg, event["id"], str(callback)) if not callback.process(event): # A callback in the plugin failed. Deactivate the whole # plugin. self._active = False break else: msg = "Skipping inactive callback %s in plugin." self.logger.debug(msg, str(callback)) return self._active def _updateLastEventId(self, event): BACKLOG_TIMEOUT = ( 5 # time in minutes after which we consider a pending event won't happen ) if self._lastEventId is not None and event["id"] > self._lastEventId + 1: event_date = event["created_at"].replace(tzinfo=None) if datetime.datetime.now() > ( event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT) ): # the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event # with a lower id should have shown up in the EventLog by now if it actually happened if event["id"] == self._lastEventId + 2: self.logger.info( "Event %d never happened - ignoring.", self._lastEventId + 1 ) else: self.logger.info( "Events %d-%d never happened - ignoring.", self._lastEventId + 1, event["id"] - 1, ) else: # in this case, we want to add the missing events to the backlog as they could show up in the # EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range # them to show up until they expire expiration = datetime.datetime.now() + datetime.timedelta( minutes=BACKLOG_TIMEOUT ) for skippedId in range(self._lastEventId + 1, event["id"]): self.logger.info("Adding event id %d to backlog.", skippedId) self._backlog[skippedId] = expiration self._lastEventId = event["id"] def __iter__(self): """ A plugin is iterable and will iterate over all its L{Callback} objects. """ return self._callbacks.__iter__() def __str__(self): """ Provide the name of the plugin when it is cast as string. @return: The name of the plugin. @rtype: I{str} """ return self.getName() class Registrar(object): """ See public API docs in docs folder. """ def __init__(self, plugin): """ Wrap a plugin so it can be passed to a user. """ self._plugin = plugin self._allowed = ["logger", "setEmails", "registerCallback"] def getLogger(self): """ Get the logger for this plugin. @return: The logger configured for this plugin. @rtype: L{logging.Logger} """ # TODO: Fix this ugly protected member access return self.logger def __getattr__(self, name): if name in self._allowed: return getattr(self._plugin, name) raise AttributeError( "type object '%s' has no attribute '%s'" % (type(self).__name__, name) ) class Callback(object): """ A part of a plugin that can be called to process a Shotgun event. """ def __init__( self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True, ): """ @param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object. """ if not callable(callback): raise TypeError( "The callback must be a callable object (function, method or callable class instance)." ) self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True # Find a name for this object if hasattr(callback, "__name__"): self._name = callback.__name__ elif hasattr(callback, "__class__") and hasattr(callback, "__call__"): self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback))) else: raise ValueError( "registerCallback should be called with a function or a callable object instance as callback argument." ) # TODO: Get rid of this protected member access self._logger = logging.getLogger(plugin.logger.name + "." + self._name) self._logger.config = self._engine.config def canProcess(self, event): if not self._matchEvents: return True if "*" in self._matchEvents: eventType = "*" else: eventType = event["event_type"] if eventType not in self._matchEvents: return False attributes = self._matchEvents[eventType] if attributes is None or "*" in attributes: return True if event["attribute_name"] and event["attribute_name"] in attributes: return True return False def process(self, event): """ Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict} """ # set session_uuid for UI updates if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event["session_uuid"]) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True # Get the local variables of the frame of our plugin tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s" self._logger.critical( msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals) ) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace("plugin.", "") end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat(end_time - start_time) delay = self._prettyTimeDeltaFormat(start_time - event["created_at"]) msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s" data = [ event["id"], event["created_at"].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay, ] self._engine.timing_logger.info(msg_format, *data) return self._active def _prettyTimeDeltaFormat(self, time_delta): days, remainder = divmod(time_delta.total_seconds(), 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) return "%02d:%02d:%02d:%02d.%06d" % ( days, hours, minutes, seconds, time_delta.microseconds, ) def isActive(self): """ Check if this callback is active, i.e. if events should be passed to it for processing. @return: True if this callback should process events, False otherwise. @rtype: I{bool} """ return self._active def __str__(self): """ The name of the callback. @return: The name of the callback @rtype: I{str} """ return self._name class CustomSMTPHandler(logging.handlers.SMTPHandler): """ A custom SMTPHandler subclass that will adapt it's subject depending on the error severity. """ LEVEL_SUBJECTS = { logging.ERROR: "ERROR - SG event daemon.", logging.CRITICAL: "CRITICAL - SG event daemon.", } def __init__( self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None ): args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials] if credentials: # Python 2.7 implemented the secure argument if CURRENT_PYTHON_VERSION >= PYTHON_27: args.append(secure) else: self.secure = secure logging.handlers.SMTPHandler.__init__(self, *args) def getSubject(self, record): subject = logging.handlers.SMTPHandler.getSubject(self, record) if record.levelno in self.LEVEL_SUBJECTS: return subject + " " + self.LEVEL_SUBJECTS[record.levelno] return subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ # Mostly copied from Python 2.7 implementation. try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg, ) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class EventDaemonError(Exception): """ Base error for the Shotgun event system. """ pass class ConfigError(EventDaemonError): """ Used when an error is detected in the config file. """ pass if sys.platform == "win32": class WindowsService(win32serviceutil.ServiceFramework): """ Windows service wrapper """ _svc_name_ = "ShotgunEventDaemon" _svc_display_name_ = "Shotgun Event Handler" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self._engine = Engine(_getConfigPath()) def SvcStop(self): """ Stop the Windows service. """ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop() def SvcDoRun(self): """ Start the Windows service. """ servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ""), ) self.main() def main(self): """ Primary Windows entry point """ self._engine.start() class LinuxDaemon(daemonizer.Daemon): """ Linux Daemon wrapper or wrapper used for foreground operation on Windows """ def __init__(self): self._engine = Engine(_getConfigPath()) super(LinuxDaemon, self).__init__( "shotgunEvent", self._engine.config.getEnginePIDFile() ) def start(self, daemonize=True): if not daemonize: # Setup the stdout logger handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(levelname)s:%(name)s:%(message)s") ) logging.getLogger().addHandler(handler) super(LinuxDaemon, self).start(daemonize) def _run(self): """ Start the engine's main loop """ self._engine.start() def _cleanup(self): self._engine.stop() def main(): """ """ if CURRENT_PYTHON_VERSION <= PYTHON_26: print( "Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer." ) return 3 action = None if len(sys.argv) > 1: action = sys.argv[1] if sys.platform == "win32" and action != "foreground": win32serviceutil.HandleCommandLine(WindowsService) return 0 if action: daemon = LinuxDaemon() # Find the function to call on the daemon and call it func = getattr(daemon, action, None) if action[:1] != "_" and func is not None: func() return 0 print("Unknown command: %s" % action) print("usage: %s start|stop|restart|foreground" % sys.argv[0]) return 2 def _getConfigPath(): """ Get the path of the shotgunEventDaemon configuration file. """ paths = ["/etc", os.path.dirname(__file__)] # Get the current path of the daemon script scriptPath = sys.argv[0] if scriptPath != "" and scriptPath != "-c": # Make absolute path and eliminate any symlinks if any. scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) # Add the script's directory to the paths we'll search for the config. paths[:0] = [os.path.dirname(scriptPath)] # Search for a config file. for path in paths: path = os.path.join(path, "shotgunEventDaemon.conf") if os.path.exists(path): return path # No config file was found raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths)) if __name__ == "__main__": sys.exit(main())
def __init__(self, engine, path): """ @param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file. """ self._engine = engine self._path = path if not os.path.isfile(path): raise ValueError("The path to the plugin is not a valid file - %s." % path) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} # Setup the plugin's logger self.logger = logging.getLogger("plugin." + self.getName()) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if self._engine.config.getLogMode() == 1: _setFilePathOnLogger( self.logger, self._engine.config.getLogFile("plugin." + self.getName()) )
723
753
#!/usr/bin/env python # # Init file for Shotgun event daemon # # chkconfig: 345 99 00 # description: Shotgun event daemon # ### BEGIN INIT INFO # Provides: shotgunEvent # Required-Start: $network # Should-Start: $remote_fs # Required-Stop: $network # Should-Stop: $remote_fs # Default-Start: 2 3 4 5 # Short-Description: Shotgun event daemon # Description: Shotgun event daemon ### END INIT INFO """ For an overview of shotgunEvents, please see raw documentation in the docs folder or an html compiled version at: http://shotgunsoftware.github.com/shotgunEvents """ from __future__ import print_function __version__ = "1.0" __version_info__ = (1, 0) # Suppress the deprecation warning about imp until we get around to replacing it import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import imp import datetime import logging import logging.handlers import os import pprint import socket import sys import time import traceback from six.moves import configparser import six.moves.cPickle as pickle from distutils.version import StrictVersion if sys.platform == "win32": import win32serviceutil import win32service import win32event import servicemanager import daemonizer import shotgun_api3 as sg from shotgun_api3.lib.sgtimezone import SgTimezone SG_TIMEZONE = SgTimezone() CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0]) PYTHON_26 = StrictVersion("2.6") PYTHON_27 = StrictVersion("2.7") EMAIL_FORMAT_STRING = """Time: %(asctime)s Logger: %(name)s Path: %(pathname)s Function: %(funcName)s Line: %(lineno)d %(message)s""" def _setFilePathOnLogger(logger, path): # Remove any previous handler. _removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler) # Add the file handler handler = logging.handlers.TimedRotatingFileHandler( path, "midnight", backupCount=10 ) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) logger.addHandler(handler) def _removeHandlersFromLogger(logger, handlerTypes=None): """ Remove all handlers or handlers of a specified type from a logger. @param logger: The logger who's handlers should be processed. @type logger: A logging.Logger object @param handlerTypes: A type of handler or list/tuple of types of handlers that should be removed from the logger. If I{None}, all handlers are removed. @type handlerTypes: L{None}, a logging.Handler subclass or I{list}/I{tuple} of logging.Handler subclasses. """ for handler in logger.handlers: if handlerTypes is None or isinstance(handler, handlerTypes): logger.removeHandler(handler) def _addMailHandlerToLogger( logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None, ): """ Configure a logger with a handler that sends emails to specified addresses. The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}. @note: Any SMTPHandler already connected to the logger will be removed. @param logger: The logger to configure @type logger: A logging.Logger instance @param toAddrs: The addresses to send the email to. @type toAddrs: A list of email addresses that will be passed on to the SMTPHandler. """ if smtpServer and fromAddr and toAddrs and emailSubject: mailHandler = CustomSMTPHandler( smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure ) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler) class Config(configparser.SafeConfigParser): def __init__(self, path): configparser.SafeConfigParser.__init__(self, os.environ) self.read(path) def getShotgunURL(self): return self.get("shotgun", "server") def getEngineScriptName(self): return self.get("shotgun", "name") def getEngineScriptKey(self): return self.get("shotgun", "key") def getEngineProxyServer(self): try: proxy_server = self.get("shotgun", "proxy_server").strip() if not proxy_server: return None return proxy_server except configparser.NoOptionError: return None def getEventIdFile(self): return self.get("daemon", "eventIdFile") def getEnginePIDFile(self): return self.get("daemon", "pidFile") def getPluginPaths(self): return [s.strip() for s in self.get("plugins", "paths").split(",")] def getSMTPServer(self): return self.get("emails", "server") def getSMTPPort(self): if self.has_option("emails", "port"): return self.getint("emails", "port") return 25 def getFromAddr(self): return self.get("emails", "from") def getToAddrs(self): return [s.strip() for s in self.get("emails", "to").split(",")] def getEmailSubject(self): return self.get("emails", "subject") def getEmailUsername(self): if self.has_option("emails", "username"): return self.get("emails", "username") return None def getEmailPassword(self): if self.has_option("emails", "password"): return self.get("emails", "password") return None def getSecureSMTP(self): if self.has_option("emails", "useTLS"): return self.getboolean("emails", "useTLS") or False return False def getLogMode(self): return self.getint("daemon", "logMode") def getLogLevel(self): return self.getint("daemon", "logging") def getMaxEventBatchSize(self): if self.has_option("daemon", "max_event_batch_size"): return self.getint("daemon", "max_event_batch_size") return 500 def getLogFile(self, filename=None): if filename is None: if self.has_option("daemon", "logFile"): filename = self.get("daemon", "logFile") else: raise ConfigError("The config file has no logFile option.") if self.has_option("daemon", "logPath"): path = self.get("daemon", "logPath") if not os.path.exists(path): os.makedirs(path) elif not os.path.isdir(path): raise ConfigError( "The logPath value in the config should point to a directory." ) path = os.path.join(path, filename) else: path = filename return path def getTimingLogFile(self): if ( not self.has_option("daemon", "timing_log") or self.get("daemon", "timing_log") != "on" ): return None return self.getLogFile() + ".timing" class Engine(object): """ The engine holds the main loop of event processing. """ def __init__(self, configPath): """ """ self._continue = True self._eventIdData = {} # Read/parse the config self.config = Config(configPath) # Get config values self._pluginCollections = [ PluginCollection(self, s) for s in self.config.getPluginPaths() ] self._sg = sg.Shotgun( self.config.getShotgunURL(), self.config.getEngineScriptName(), self.config.getEngineScriptKey(), http_proxy=self.config.getEngineProxyServer(), ) self._max_conn_retries = self.config.getint("daemon", "max_conn_retries") self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep") self._fetch_interval = self.config.getint("daemon", "fetch_interval") self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid") # Setup the loggers for the main engine if self.config.getLogMode() == 0: # Set the root logger for file output. rootLogger = logging.getLogger() rootLogger.config = self.config _setFilePathOnLogger(rootLogger, self.config.getLogFile()) print(self.config.getLogFile()) # Set the engine logger for email output. self.log = logging.getLogger("engine") self.setEmailsOnLogger(self.log, True) else: # Set the engine logger for file and email output. self.log = logging.getLogger("engine") self.log.config = self.config _setFilePathOnLogger(self.log, self.config.getLogFile()) self.setEmailsOnLogger(self.log, True) self.log.setLevel(self.config.getLogLevel()) # Setup the timing log file timing_log_filename = self.config.getTimingLogFile() if timing_log_filename: self.timing_logger = logging.getLogger("timing") self.timing_logger.setLevel(self.config.getLogLevel()) _setFilePathOnLogger(self.timing_logger, timing_log_filename) else: self.timing_logger = None super(Engine, self).__init__() def setEmailsOnLogger(self, logger, emails): # Configure the logger for email output _removeHandlersFromLogger(logger, logging.handlers.SMTPHandler) if emails is False: return smtpServer = self.config.getSMTPServer() smtpPort = self.config.getSMTPPort() fromAddr = self.config.getFromAddr() emailSubject = self.config.getEmailSubject() username = self.config.getEmailUsername() password = self.config.getEmailPassword() if self.config.getSecureSMTP(): secure = (None, None) else: secure = None if emails is True: toAddrs = self.config.getToAddrs() elif isinstance(emails, (list, tuple)): toAddrs = emails else: msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s." raise ValueError(msg % type(emails)) _addMailHandlerToLogger( logger, (smtpServer, smtpPort), fromAddr, toAddrs, emailSubject, username, password, secure, ) def start(self): """ Start the processing of events. The last processed id is loaded up from persistent storage on disk and the main loop is started. """ # TODO: Take value from config socket.setdefaulttimeout(60) # Notify which version of shotgun api we are using self.log.info("Using SG Python API version %s" % sg.__version__) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning("Keyboard interrupt. Cleaning up...") except Exception as err: msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s" self.log.critical(msg, type(err), traceback.format_exc(err)) def _loadEventIdData(self): """ Load the last processed event id from the disk If no event has ever been processed or if the eventIdFile has been deleted from disk, no id will be recoverable. In this case, we will try contacting Shotgun to get the latest event's id and we'll start processing from there. """ eventIdFile = self.config.getEventIdFile() if eventIdFile and os.path.exists(eventIdFile): try: fh = open(eventIdFile, "rb") try: self._eventIdData = pickle.load(fh) # Provide event id info to the plugin collections. Once # they've figured out what to do with it, ask them for their # last processed id. noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) # If we don't have a state it means there's no match # in the id file. First we'll search to see the latest id a # matching plugin name has elsewhere in the id file. We do # this as a fallback in case the plugins directory has been # moved. If there's no match, use the latest event id # in Shotgun. if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for pluginName, pluginState in collection.items(): if pluginName in maxPluginStates.keys(): if pluginState[0] > maxPluginStates[pluginName][0]: maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if pluginName in maxPluginStates.keys(): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() # Backwards compatibility: # Reopen the file to try to read an old-style int fh = open(eventIdFile, "rb") line = fh.readline().strip() if line.isdigit(): # The _loadEventIdData got an old-style id file containing a single # int which is the last id properly processed. lastEventId = int(line) self.log.debug( "Read last event id (%d) from file.", lastEventId ) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError( "Could not load event id from file.\n\n%s" % traceback.format_exc(err) ) else: # No id file? # Get the event data from the database. lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData() def _getLastEventIdFromDatabase(self): conn_attempts = 0 lastEventId = None while lastEventId is None: order = [{"column": "id", "direction": "desc"}] try: result = self._sg.find_one( "EventLogEntry", filters=[], fields=["id"], order=order ) except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) else: lastEventId = result["id"] self.log.info("Last event id (%d) from the SG database.", lastEventId) return lastEventId def _mainLoop(self): """ Run the event processing loop. General behavior: - Load plugins from disk - see L{load} method. - Get new events from Shotgun - Loop through events - Loop through each plugin - Loop through each callback - Send the callback an event - Once all callbacks are done in all plugins, save the eventId - Go to the next event - Once all events are processed, wait for the defined fetch interval time and start over. Caveats: - If a plugin is deemed "inactive" (an error occured during registration), skip it. - If a callback is deemed "inactive" (an error occured during callback execution), skip it. - Each time through the loop, if the pidFile is gone, stop. """ self.log.debug("Starting the event processing loop.") while self._continue: # Process events events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() # if we're lagging behind Shotgun, we received a full batch of events # skip the sleep() call in this case if len(events) < self.config.getMaxEventBatchSize(): time.sleep(self._fetch_interval) # Reload plugins for collection in self._pluginCollections: collection.load() # Make sure that newly loaded events have proper state. self._loadEventIdData() self.log.debug("Shuting down event processing loop.") def stop(self): self._continue = False def _getNewEvents(self): """ Fetch new events from Shotgun. @return: Recent events that need to be processed by the engine. @rtype: I{list} of Shotgun event dictionaries. """ nextEventId = None for newId in [ coll.getNextUnprocessedEventId() for coll in self._pluginCollections ]: if newId is not None and (nextEventId is None or newId < nextEventId): nextEventId = newId if nextEventId is not None: filters = [["id", "greater_than", nextEventId - 1]] fields = [ "id", "event_type", "attribute_name", "meta", "entity", "user", "project", "session_uuid", "created_at", ] order = [{"column": "id", "direction": "asc"}] conn_attempts = 0 while True: try: events = self._sg.find( "EventLogEntry", filters, fields, order, limit=self.config.getMaxEventBatchSize(), ) if events: self.log.debug( "Got %d events: %d to %d.", len(events), events[0]["id"], events[-1]["id"], ) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts( conn_attempts, str(err) ) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return [] def _saveEventIdData(self): """ Save an event Id to persistant storage. Next time the engine is started it will try to read the event id from this location to know at which event it should start processing. """ eventIdFile = self.config.getEventIdFile() if eventIdFile is not None: for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for colPath, state in self._eventIdData.items(): if state: try: with open(eventIdFile, "wb") as fh: # Use protocol 2 so it can also be loaded in Python 2 pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error( "Can not write event id data to %s.\n\n%s", eventIdFile, traceback.format_exc(err), ) break else: self.log.warning("No state was found. Not saving to disk.") def _checkConnectionAttempts(self, conn_attempts, msg): conn_attempts += 1 if conn_attempts == self._max_conn_retries: self.log.error( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) conn_attempts = 0 time.sleep(self._conn_retry_sleep) else: self.log.warning( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) return conn_attempts class PluginCollection(object): """ A group of plugin files in a location on the disk. """ def __init__(self, engine, path): if not os.path.isdir(path): raise ValueError("Invalid path: %s" % path) self._engine = engine self.path = path self._plugins = {} self._stateData = {} def setState(self, state): if isinstance(state, int): for plugin in self: plugin.setState(state) self._stateData[plugin.getName()] = plugin.getState() else: self._stateData = state for plugin in self: pluginState = self._stateData.get(plugin.getName()) if pluginState: plugin.setState(pluginState) def getState(self): for plugin in self: self._stateData[plugin.getName()] = plugin.getState() return self._stateData def getNextUnprocessedEventId(self): eId = None for plugin in self: if not plugin.isActive(): continue newId = plugin.getNextUnprocessedEventId() if newId is not None and (eId is None or newId < eId): eId = newId return eId def process(self, event): for plugin in self: if plugin.isActive(): plugin.process(event) else: plugin.logger.debug("Skipping: inactive.") def load(self): """ Load plugins from disk. General behavior: - Loop on all paths. - Find all valid .py plugin files. - Loop on all plugin files. - For any new plugins, load them, otherwise, refresh them. """ newPlugins = {} for basename in os.listdir(self.path): if not basename.endswith(".py") or basename.startswith("."): continue if basename in self._plugins: newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin( self._engine, os.path.join(self.path, basename) ) newPlugins[basename].load() self._plugins = newPlugins def __iter__(self): for basename in sorted(self._plugins.keys()): yield self._plugins[basename] class Plugin(object): """ The plugin class represents a file on disk which contains one or more callbacks. """ def __init__(self, engine, path): """ @param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file. """ self._engine = engine self._path = path if not os.path.isfile(path): raise ValueError("The path to the plugin is not a valid file - %s." % path) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} # Setup the plugin's logger self.logger = logging.getLogger("plugin." + self.getName()) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if self._engine.config.getLogMode() == 1: _setFilePathOnLogger( self.logger, self._engine.config.getLogFile("plugin." + self.getName()) ) def getName(self): return self._pluginName def setState(self, state): if isinstance(state, int): self._lastEventId = state elif isinstance(state, tuple): self._lastEventId, self._backlog = state else: raise ValueError("Unknown state type: %s." % type(state)) def getState(self): return (self._lastEventId, self._backlog) def getNextUnprocessedEventId(self): if self._lastEventId: nextId = self._lastEventId + 1 else: nextId = None now = datetime.datetime.now() for k in list(self._backlog): v = self._backlog[k] if v < now: self.logger.warning("Timeout elapsed on backlog event id %d.", k) del self._backlog[k] elif nextId is None or k < nextId: nextId = k return nextId def isActive(self): """ Is the current plugin active. Should it's callbacks be run? @return: True if this plugin's callbacks should be run, False otherwise. @rtype: I{bool} """ return self._active def setEmails(self, *emails): """ Set the email addresses to whom this plugin should send errors. @param emails: See L{LogFactory.getLogger}'s emails argument for info. @type emails: A I{list}/I{tuple} of email addresses or I{bool}. """ self._engine.setEmailsOnLogger(self.logger, emails) def load(self): """ Load/Reload the plugin and all its callbacks. If a plugin has never been loaded it will be loaded normally. If the plugin has been loaded before it will be reloaded only if the file has been modified on disk. In this event callbacks will all be cleared and reloaded. General behavior: - Try to load the source of the plugin. - Try to find a function called registerCallbacks in the file. - Try to run the registration function. At every step along the way, if any error occurs the whole plugin will be deactivated and the function will return. """ # Check file mtime mtime = os.path.getmtime(self._path) if self._mtime is None: self._engine.log.info("Loading plugin at %s" % self._path) elif self._mtime < mtime: self._engine.log.info("Reloading plugin at %s" % self._path) else: # The mtime of file is equal or older. We don't need to do anything. return # Reset values self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error( "Could not load the plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) return regFunc = getattr(plugin, "registerCallbacks", None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical( "Error running register callback function from plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) self._active = False else: self._engine.log.critical( "Did not find a registerCallbacks function in plugin at %s.", self._path ) self._active = False def registerCallback( self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True, ): """ Register a callback in the plugin. """ global sg sgConnection = sg.Shotgun( self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer(), ) self._callbacks.append( Callback( callback, self, self._engine, sgConnection, matchEvents, args, stopOnError, ) ) def process(self, event): if event["id"] in self._backlog: if self._process(event): self.logger.info("Processed id %d from backlog." % event["id"]) del self._backlog[event["id"]] self._updateLastEventId(event) elif self._lastEventId is not None and event["id"] <= self._lastEventId: msg = "Event %d is too old. Last event processed was (%d)." self.logger.debug(msg, event["id"], self._lastEventId) else: if self._process(event): self._updateLastEventId(event) return self._active def _process(self, event): for callback in self: if callback.isActive(): if callback.canProcess(event): msg = "Dispatching event %d to callback %s." self.logger.debug(msg, event["id"], str(callback)) if not callback.process(event): # A callback in the plugin failed. Deactivate the whole # plugin. self._active = False break else: msg = "Skipping inactive callback %s in plugin." self.logger.debug(msg, str(callback)) return self._active def _updateLastEventId(self, event): BACKLOG_TIMEOUT = ( 5 # time in minutes after which we consider a pending event won't happen ) if self._lastEventId is not None and event["id"] > self._lastEventId + 1: event_date = event["created_at"].replace(tzinfo=None) if datetime.datetime.now() > ( event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT) ): # the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event # with a lower id should have shown up in the EventLog by now if it actually happened if event["id"] == self._lastEventId + 2: self.logger.info( "Event %d never happened - ignoring.", self._lastEventId + 1 ) else: self.logger.info( "Events %d-%d never happened - ignoring.", self._lastEventId + 1, event["id"] - 1, ) else: # in this case, we want to add the missing events to the backlog as they could show up in the # EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range # them to show up until they expire expiration = datetime.datetime.now() + datetime.timedelta( minutes=BACKLOG_TIMEOUT ) for skippedId in range(self._lastEventId + 1, event["id"]): self.logger.info("Adding event id %d to backlog.", skippedId) self._backlog[skippedId] = expiration self._lastEventId = event["id"] def __iter__(self): """ A plugin is iterable and will iterate over all its L{Callback} objects. """ return self._callbacks.__iter__() def __str__(self): """ Provide the name of the plugin when it is cast as string. @return: The name of the plugin. @rtype: I{str} """ return self.getName() class Registrar(object): """ See public API docs in docs folder. """ def __init__(self, plugin): """ Wrap a plugin so it can be passed to a user. """ self._plugin = plugin self._allowed = ["logger", "setEmails", "registerCallback"] def getLogger(self): """ Get the logger for this plugin. @return: The logger configured for this plugin. @rtype: L{logging.Logger} """ # TODO: Fix this ugly protected member access return self.logger def __getattr__(self, name): if name in self._allowed: return getattr(self._plugin, name) raise AttributeError( "type object '%s' has no attribute '%s'" % (type(self).__name__, name) ) class Callback(object): """ A part of a plugin that can be called to process a Shotgun event. """ def __init__( self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True, ): """ @param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object. """ if not callable(callback): raise TypeError( "The callback must be a callable object (function, method or callable class instance)." ) self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True # Find a name for this object if hasattr(callback, "__name__"): self._name = callback.__name__ elif hasattr(callback, "__class__") and hasattr(callback, "__call__"): self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback))) else: raise ValueError( "registerCallback should be called with a function or a callable object instance as callback argument." ) # TODO: Get rid of this protected member access self._logger = logging.getLogger(plugin.logger.name + "." + self._name) self._logger.config = self._engine.config def canProcess(self, event): if not self._matchEvents: return True if "*" in self._matchEvents: eventType = "*" else: eventType = event["event_type"] if eventType not in self._matchEvents: return False attributes = self._matchEvents[eventType] if attributes is None or "*" in attributes: return True if event["attribute_name"] and event["attribute_name"] in attributes: return True return False def process(self, event): """ Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict} """ # set session_uuid for UI updates if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event["session_uuid"]) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True # Get the local variables of the frame of our plugin tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s" self._logger.critical( msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals) ) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace("plugin.", "") end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat(end_time - start_time) delay = self._prettyTimeDeltaFormat(start_time - event["created_at"]) msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s" data = [ event["id"], event["created_at"].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay, ] self._engine.timing_logger.info(msg_format, *data) return self._active def _prettyTimeDeltaFormat(self, time_delta): days, remainder = divmod(time_delta.total_seconds(), 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) return "%02d:%02d:%02d:%02d.%06d" % ( days, hours, minutes, seconds, time_delta.microseconds, ) def isActive(self): """ Check if this callback is active, i.e. if events should be passed to it for processing. @return: True if this callback should process events, False otherwise. @rtype: I{bool} """ return self._active def __str__(self): """ The name of the callback. @return: The name of the callback @rtype: I{str} """ return self._name class CustomSMTPHandler(logging.handlers.SMTPHandler): """ A custom SMTPHandler subclass that will adapt it's subject depending on the error severity. """ LEVEL_SUBJECTS = { logging.ERROR: "ERROR - SG event daemon.", logging.CRITICAL: "CRITICAL - SG event daemon.", } def __init__( self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None ): args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials] if credentials: # Python 2.7 implemented the secure argument if CURRENT_PYTHON_VERSION >= PYTHON_27: args.append(secure) else: self.secure = secure logging.handlers.SMTPHandler.__init__(self, *args) def getSubject(self, record): subject = logging.handlers.SMTPHandler.getSubject(self, record) if record.levelno in self.LEVEL_SUBJECTS: return subject + " " + self.LEVEL_SUBJECTS[record.levelno] return subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ # Mostly copied from Python 2.7 implementation. try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg, ) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class EventDaemonError(Exception): """ Base error for the Shotgun event system. """ pass class ConfigError(EventDaemonError): """ Used when an error is detected in the config file. """ pass if sys.platform == "win32": class WindowsService(win32serviceutil.ServiceFramework): """ Windows service wrapper """ _svc_name_ = "ShotgunEventDaemon" _svc_display_name_ = "Shotgun Event Handler" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self._engine = Engine(_getConfigPath()) def SvcStop(self): """ Stop the Windows service. """ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop() def SvcDoRun(self): """ Start the Windows service. """ servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ""), ) self.main() def main(self): """ Primary Windows entry point """ self._engine.start() class LinuxDaemon(daemonizer.Daemon): """ Linux Daemon wrapper or wrapper used for foreground operation on Windows """ def __init__(self): self._engine = Engine(_getConfigPath()) super(LinuxDaemon, self).__init__( "shotgunEvent", self._engine.config.getEnginePIDFile() ) def start(self, daemonize=True): if not daemonize: # Setup the stdout logger handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(levelname)s:%(name)s:%(message)s") ) logging.getLogger().addHandler(handler) super(LinuxDaemon, self).start(daemonize) def _run(self): """ Start the engine's main loop """ self._engine.start() def _cleanup(self): self._engine.stop() def main(): """ """ if CURRENT_PYTHON_VERSION <= PYTHON_26: print( "Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer." ) return 3 action = None if len(sys.argv) > 1: action = sys.argv[1] if sys.platform == "win32" and action != "foreground": win32serviceutil.HandleCommandLine(WindowsService) return 0 if action: daemon = LinuxDaemon() # Find the function to call on the daemon and call it func = getattr(daemon, action, None) if action[:1] != "_" and func is not None: func() return 0 print("Unknown command: %s" % action) print("usage: %s start|stop|restart|foreground" % sys.argv[0]) return 2 def _getConfigPath(): """ Get the path of the shotgunEventDaemon configuration file. """ paths = ["/etc", os.path.dirname(__file__)] # Get the current path of the daemon script scriptPath = sys.argv[0] if scriptPath != "" and scriptPath != "-c": # Make absolute path and eliminate any symlinks if any. scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) # Add the script's directory to the paths we'll search for the config. paths[:0] = [os.path.dirname(scriptPath)] # Search for a config file. for path in paths: path = os.path.join(path, "shotgunEventDaemon.conf") if os.path.exists(path): return path # No config file was found raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths)) if __name__ == "__main__": sys.exit(main())
__init__
@param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object.
#!/usr/bin/env python # # Init file for Shotgun event daemon # # chkconfig: 345 99 00 # description: Shotgun event daemon # ### BEGIN INIT INFO # Provides: shotgunEvent # Required-Start: $network # Should-Start: $remote_fs # Required-Stop: $network # Should-Stop: $remote_fs # Default-Start: 2 3 4 5 # Short-Description: Shotgun event daemon # Description: Shotgun event daemon ### END INIT INFO """ For an overview of shotgunEvents, please see raw documentation in the docs folder or an html compiled version at: http://shotgunsoftware.github.com/shotgunEvents """ from __future__ import print_function __version__ = "1.0" __version_info__ = (1, 0) # Suppress the deprecation warning about imp until we get around to replacing it import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import imp import datetime import logging import logging.handlers import os import pprint import socket import sys import time import traceback from six.moves import configparser import six.moves.cPickle as pickle from distutils.version import StrictVersion if sys.platform == "win32": import win32serviceutil import win32service import win32event import servicemanager import daemonizer import shotgun_api3 as sg from shotgun_api3.lib.sgtimezone import SgTimezone SG_TIMEZONE = SgTimezone() CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0]) PYTHON_26 = StrictVersion("2.6") PYTHON_27 = StrictVersion("2.7") EMAIL_FORMAT_STRING = """Time: %(asctime)s Logger: %(name)s Path: %(pathname)s Function: %(funcName)s Line: %(lineno)d %(message)s""" def _setFilePathOnLogger(logger, path): # Remove any previous handler. _removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler) # Add the file handler handler = logging.handlers.TimedRotatingFileHandler( path, "midnight", backupCount=10 ) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) logger.addHandler(handler) def _removeHandlersFromLogger(logger, handlerTypes=None): """ Remove all handlers or handlers of a specified type from a logger. @param logger: The logger who's handlers should be processed. @type logger: A logging.Logger object @param handlerTypes: A type of handler or list/tuple of types of handlers that should be removed from the logger. If I{None}, all handlers are removed. @type handlerTypes: L{None}, a logging.Handler subclass or I{list}/I{tuple} of logging.Handler subclasses. """ for handler in logger.handlers: if handlerTypes is None or isinstance(handler, handlerTypes): logger.removeHandler(handler) def _addMailHandlerToLogger( logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None, ): """ Configure a logger with a handler that sends emails to specified addresses. The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}. @note: Any SMTPHandler already connected to the logger will be removed. @param logger: The logger to configure @type logger: A logging.Logger instance @param toAddrs: The addresses to send the email to. @type toAddrs: A list of email addresses that will be passed on to the SMTPHandler. """ if smtpServer and fromAddr and toAddrs and emailSubject: mailHandler = CustomSMTPHandler( smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure ) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler) class Config(configparser.SafeConfigParser): def __init__(self, path): configparser.SafeConfigParser.__init__(self, os.environ) self.read(path) def getShotgunURL(self): return self.get("shotgun", "server") def getEngineScriptName(self): return self.get("shotgun", "name") def getEngineScriptKey(self): return self.get("shotgun", "key") def getEngineProxyServer(self): try: proxy_server = self.get("shotgun", "proxy_server").strip() if not proxy_server: return None return proxy_server except configparser.NoOptionError: return None def getEventIdFile(self): return self.get("daemon", "eventIdFile") def getEnginePIDFile(self): return self.get("daemon", "pidFile") def getPluginPaths(self): return [s.strip() for s in self.get("plugins", "paths").split(",")] def getSMTPServer(self): return self.get("emails", "server") def getSMTPPort(self): if self.has_option("emails", "port"): return self.getint("emails", "port") return 25 def getFromAddr(self): return self.get("emails", "from") def getToAddrs(self): return [s.strip() for s in self.get("emails", "to").split(",")] def getEmailSubject(self): return self.get("emails", "subject") def getEmailUsername(self): if self.has_option("emails", "username"): return self.get("emails", "username") return None def getEmailPassword(self): if self.has_option("emails", "password"): return self.get("emails", "password") return None def getSecureSMTP(self): if self.has_option("emails", "useTLS"): return self.getboolean("emails", "useTLS") or False return False def getLogMode(self): return self.getint("daemon", "logMode") def getLogLevel(self): return self.getint("daemon", "logging") def getMaxEventBatchSize(self): if self.has_option("daemon", "max_event_batch_size"): return self.getint("daemon", "max_event_batch_size") return 500 def getLogFile(self, filename=None): if filename is None: if self.has_option("daemon", "logFile"): filename = self.get("daemon", "logFile") else: raise ConfigError("The config file has no logFile option.") if self.has_option("daemon", "logPath"): path = self.get("daemon", "logPath") if not os.path.exists(path): os.makedirs(path) elif not os.path.isdir(path): raise ConfigError( "The logPath value in the config should point to a directory." ) path = os.path.join(path, filename) else: path = filename return path def getTimingLogFile(self): if ( not self.has_option("daemon", "timing_log") or self.get("daemon", "timing_log") != "on" ): return None return self.getLogFile() + ".timing" class Engine(object): """ The engine holds the main loop of event processing. """ def __init__(self, configPath): """ """ self._continue = True self._eventIdData = {} # Read/parse the config self.config = Config(configPath) # Get config values self._pluginCollections = [ PluginCollection(self, s) for s in self.config.getPluginPaths() ] self._sg = sg.Shotgun( self.config.getShotgunURL(), self.config.getEngineScriptName(), self.config.getEngineScriptKey(), http_proxy=self.config.getEngineProxyServer(), ) self._max_conn_retries = self.config.getint("daemon", "max_conn_retries") self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep") self._fetch_interval = self.config.getint("daemon", "fetch_interval") self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid") # Setup the loggers for the main engine if self.config.getLogMode() == 0: # Set the root logger for file output. rootLogger = logging.getLogger() rootLogger.config = self.config _setFilePathOnLogger(rootLogger, self.config.getLogFile()) print(self.config.getLogFile()) # Set the engine logger for email output. self.log = logging.getLogger("engine") self.setEmailsOnLogger(self.log, True) else: # Set the engine logger for file and email output. self.log = logging.getLogger("engine") self.log.config = self.config _setFilePathOnLogger(self.log, self.config.getLogFile()) self.setEmailsOnLogger(self.log, True) self.log.setLevel(self.config.getLogLevel()) # Setup the timing log file timing_log_filename = self.config.getTimingLogFile() if timing_log_filename: self.timing_logger = logging.getLogger("timing") self.timing_logger.setLevel(self.config.getLogLevel()) _setFilePathOnLogger(self.timing_logger, timing_log_filename) else: self.timing_logger = None super(Engine, self).__init__() def setEmailsOnLogger(self, logger, emails): # Configure the logger for email output _removeHandlersFromLogger(logger, logging.handlers.SMTPHandler) if emails is False: return smtpServer = self.config.getSMTPServer() smtpPort = self.config.getSMTPPort() fromAddr = self.config.getFromAddr() emailSubject = self.config.getEmailSubject() username = self.config.getEmailUsername() password = self.config.getEmailPassword() if self.config.getSecureSMTP(): secure = (None, None) else: secure = None if emails is True: toAddrs = self.config.getToAddrs() elif isinstance(emails, (list, tuple)): toAddrs = emails else: msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s." raise ValueError(msg % type(emails)) _addMailHandlerToLogger( logger, (smtpServer, smtpPort), fromAddr, toAddrs, emailSubject, username, password, secure, ) def start(self): """ Start the processing of events. The last processed id is loaded up from persistent storage on disk and the main loop is started. """ # TODO: Take value from config socket.setdefaulttimeout(60) # Notify which version of shotgun api we are using self.log.info("Using SG Python API version %s" % sg.__version__) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning("Keyboard interrupt. Cleaning up...") except Exception as err: msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s" self.log.critical(msg, type(err), traceback.format_exc(err)) def _loadEventIdData(self): """ Load the last processed event id from the disk If no event has ever been processed or if the eventIdFile has been deleted from disk, no id will be recoverable. In this case, we will try contacting Shotgun to get the latest event's id and we'll start processing from there. """ eventIdFile = self.config.getEventIdFile() if eventIdFile and os.path.exists(eventIdFile): try: fh = open(eventIdFile, "rb") try: self._eventIdData = pickle.load(fh) # Provide event id info to the plugin collections. Once # they've figured out what to do with it, ask them for their # last processed id. noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) # If we don't have a state it means there's no match # in the id file. First we'll search to see the latest id a # matching plugin name has elsewhere in the id file. We do # this as a fallback in case the plugins directory has been # moved. If there's no match, use the latest event id # in Shotgun. if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for pluginName, pluginState in collection.items(): if pluginName in maxPluginStates.keys(): if pluginState[0] > maxPluginStates[pluginName][0]: maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if pluginName in maxPluginStates.keys(): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() # Backwards compatibility: # Reopen the file to try to read an old-style int fh = open(eventIdFile, "rb") line = fh.readline().strip() if line.isdigit(): # The _loadEventIdData got an old-style id file containing a single # int which is the last id properly processed. lastEventId = int(line) self.log.debug( "Read last event id (%d) from file.", lastEventId ) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError( "Could not load event id from file.\n\n%s" % traceback.format_exc(err) ) else: # No id file? # Get the event data from the database. lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData() def _getLastEventIdFromDatabase(self): conn_attempts = 0 lastEventId = None while lastEventId is None: order = [{"column": "id", "direction": "desc"}] try: result = self._sg.find_one( "EventLogEntry", filters=[], fields=["id"], order=order ) except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) else: lastEventId = result["id"] self.log.info("Last event id (%d) from the SG database.", lastEventId) return lastEventId def _mainLoop(self): """ Run the event processing loop. General behavior: - Load plugins from disk - see L{load} method. - Get new events from Shotgun - Loop through events - Loop through each plugin - Loop through each callback - Send the callback an event - Once all callbacks are done in all plugins, save the eventId - Go to the next event - Once all events are processed, wait for the defined fetch interval time and start over. Caveats: - If a plugin is deemed "inactive" (an error occured during registration), skip it. - If a callback is deemed "inactive" (an error occured during callback execution), skip it. - Each time through the loop, if the pidFile is gone, stop. """ self.log.debug("Starting the event processing loop.") while self._continue: # Process events events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() # if we're lagging behind Shotgun, we received a full batch of events # skip the sleep() call in this case if len(events) < self.config.getMaxEventBatchSize(): time.sleep(self._fetch_interval) # Reload plugins for collection in self._pluginCollections: collection.load() # Make sure that newly loaded events have proper state. self._loadEventIdData() self.log.debug("Shuting down event processing loop.") def stop(self): self._continue = False def _getNewEvents(self): """ Fetch new events from Shotgun. @return: Recent events that need to be processed by the engine. @rtype: I{list} of Shotgun event dictionaries. """ nextEventId = None for newId in [ coll.getNextUnprocessedEventId() for coll in self._pluginCollections ]: if newId is not None and (nextEventId is None or newId < nextEventId): nextEventId = newId if nextEventId is not None: filters = [["id", "greater_than", nextEventId - 1]] fields = [ "id", "event_type", "attribute_name", "meta", "entity", "user", "project", "session_uuid", "created_at", ] order = [{"column": "id", "direction": "asc"}] conn_attempts = 0 while True: try: events = self._sg.find( "EventLogEntry", filters, fields, order, limit=self.config.getMaxEventBatchSize(), ) if events: self.log.debug( "Got %d events: %d to %d.", len(events), events[0]["id"], events[-1]["id"], ) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts( conn_attempts, str(err) ) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return [] def _saveEventIdData(self): """ Save an event Id to persistant storage. Next time the engine is started it will try to read the event id from this location to know at which event it should start processing. """ eventIdFile = self.config.getEventIdFile() if eventIdFile is not None: for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for colPath, state in self._eventIdData.items(): if state: try: with open(eventIdFile, "wb") as fh: # Use protocol 2 so it can also be loaded in Python 2 pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error( "Can not write event id data to %s.\n\n%s", eventIdFile, traceback.format_exc(err), ) break else: self.log.warning("No state was found. Not saving to disk.") def _checkConnectionAttempts(self, conn_attempts, msg): conn_attempts += 1 if conn_attempts == self._max_conn_retries: self.log.error( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) conn_attempts = 0 time.sleep(self._conn_retry_sleep) else: self.log.warning( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) return conn_attempts class PluginCollection(object): """ A group of plugin files in a location on the disk. """ def __init__(self, engine, path): if not os.path.isdir(path): raise ValueError("Invalid path: %s" % path) self._engine = engine self.path = path self._plugins = {} self._stateData = {} def setState(self, state): if isinstance(state, int): for plugin in self: plugin.setState(state) self._stateData[plugin.getName()] = plugin.getState() else: self._stateData = state for plugin in self: pluginState = self._stateData.get(plugin.getName()) if pluginState: plugin.setState(pluginState) def getState(self): for plugin in self: self._stateData[plugin.getName()] = plugin.getState() return self._stateData def getNextUnprocessedEventId(self): eId = None for plugin in self: if not plugin.isActive(): continue newId = plugin.getNextUnprocessedEventId() if newId is not None and (eId is None or newId < eId): eId = newId return eId def process(self, event): for plugin in self: if plugin.isActive(): plugin.process(event) else: plugin.logger.debug("Skipping: inactive.") def load(self): """ Load plugins from disk. General behavior: - Loop on all paths. - Find all valid .py plugin files. - Loop on all plugin files. - For any new plugins, load them, otherwise, refresh them. """ newPlugins = {} for basename in os.listdir(self.path): if not basename.endswith(".py") or basename.startswith("."): continue if basename in self._plugins: newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin( self._engine, os.path.join(self.path, basename) ) newPlugins[basename].load() self._plugins = newPlugins def __iter__(self): for basename in sorted(self._plugins.keys()): yield self._plugins[basename] class Plugin(object): """ The plugin class represents a file on disk which contains one or more callbacks. """ def __init__(self, engine, path): """ @param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file. """ self._engine = engine self._path = path if not os.path.isfile(path): raise ValueError("The path to the plugin is not a valid file - %s." % path) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} # Setup the plugin's logger self.logger = logging.getLogger("plugin." + self.getName()) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if self._engine.config.getLogMode() == 1: _setFilePathOnLogger( self.logger, self._engine.config.getLogFile("plugin." + self.getName()) ) def getName(self): return self._pluginName def setState(self, state): if isinstance(state, int): self._lastEventId = state elif isinstance(state, tuple): self._lastEventId, self._backlog = state else: raise ValueError("Unknown state type: %s." % type(state)) def getState(self): return (self._lastEventId, self._backlog) def getNextUnprocessedEventId(self): if self._lastEventId: nextId = self._lastEventId + 1 else: nextId = None now = datetime.datetime.now() for k in list(self._backlog): v = self._backlog[k] if v < now: self.logger.warning("Timeout elapsed on backlog event id %d.", k) del self._backlog[k] elif nextId is None or k < nextId: nextId = k return nextId def isActive(self): """ Is the current plugin active. Should it's callbacks be run? @return: True if this plugin's callbacks should be run, False otherwise. @rtype: I{bool} """ return self._active def setEmails(self, *emails): """ Set the email addresses to whom this plugin should send errors. @param emails: See L{LogFactory.getLogger}'s emails argument for info. @type emails: A I{list}/I{tuple} of email addresses or I{bool}. """ self._engine.setEmailsOnLogger(self.logger, emails) def load(self): """ Load/Reload the plugin and all its callbacks. If a plugin has never been loaded it will be loaded normally. If the plugin has been loaded before it will be reloaded only if the file has been modified on disk. In this event callbacks will all be cleared and reloaded. General behavior: - Try to load the source of the plugin. - Try to find a function called registerCallbacks in the file. - Try to run the registration function. At every step along the way, if any error occurs the whole plugin will be deactivated and the function will return. """ # Check file mtime mtime = os.path.getmtime(self._path) if self._mtime is None: self._engine.log.info("Loading plugin at %s" % self._path) elif self._mtime < mtime: self._engine.log.info("Reloading plugin at %s" % self._path) else: # The mtime of file is equal or older. We don't need to do anything. return # Reset values self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error( "Could not load the plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) return regFunc = getattr(plugin, "registerCallbacks", None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical( "Error running register callback function from plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) self._active = False else: self._engine.log.critical( "Did not find a registerCallbacks function in plugin at %s.", self._path ) self._active = False def registerCallback( self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True, ): """ Register a callback in the plugin. """ global sg sgConnection = sg.Shotgun( self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer(), ) self._callbacks.append( Callback( callback, self, self._engine, sgConnection, matchEvents, args, stopOnError, ) ) def process(self, event): if event["id"] in self._backlog: if self._process(event): self.logger.info("Processed id %d from backlog." % event["id"]) del self._backlog[event["id"]] self._updateLastEventId(event) elif self._lastEventId is not None and event["id"] <= self._lastEventId: msg = "Event %d is too old. Last event processed was (%d)." self.logger.debug(msg, event["id"], self._lastEventId) else: if self._process(event): self._updateLastEventId(event) return self._active def _process(self, event): for callback in self: if callback.isActive(): if callback.canProcess(event): msg = "Dispatching event %d to callback %s." self.logger.debug(msg, event["id"], str(callback)) if not callback.process(event): # A callback in the plugin failed. Deactivate the whole # plugin. self._active = False break else: msg = "Skipping inactive callback %s in plugin." self.logger.debug(msg, str(callback)) return self._active def _updateLastEventId(self, event): BACKLOG_TIMEOUT = ( 5 # time in minutes after which we consider a pending event won't happen ) if self._lastEventId is not None and event["id"] > self._lastEventId + 1: event_date = event["created_at"].replace(tzinfo=None) if datetime.datetime.now() > ( event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT) ): # the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event # with a lower id should have shown up in the EventLog by now if it actually happened if event["id"] == self._lastEventId + 2: self.logger.info( "Event %d never happened - ignoring.", self._lastEventId + 1 ) else: self.logger.info( "Events %d-%d never happened - ignoring.", self._lastEventId + 1, event["id"] - 1, ) else: # in this case, we want to add the missing events to the backlog as they could show up in the # EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range # them to show up until they expire expiration = datetime.datetime.now() + datetime.timedelta( minutes=BACKLOG_TIMEOUT ) for skippedId in range(self._lastEventId + 1, event["id"]): self.logger.info("Adding event id %d to backlog.", skippedId) self._backlog[skippedId] = expiration self._lastEventId = event["id"] def __iter__(self): """ A plugin is iterable and will iterate over all its L{Callback} objects. """ return self._callbacks.__iter__() def __str__(self): """ Provide the name of the plugin when it is cast as string. @return: The name of the plugin. @rtype: I{str} """ return self.getName() class Registrar(object): """ See public API docs in docs folder. """ def __init__(self, plugin): """ Wrap a plugin so it can be passed to a user. """ self._plugin = plugin self._allowed = ["logger", "setEmails", "registerCallback"] def getLogger(self): """ Get the logger for this plugin. @return: The logger configured for this plugin. @rtype: L{logging.Logger} """ # TODO: Fix this ugly protected member access return self.logger def __getattr__(self, name): if name in self._allowed: return getattr(self._plugin, name) raise AttributeError( "type object '%s' has no attribute '%s'" % (type(self).__name__, name) ) class Callback(object): """ A part of a plugin that can be called to process a Shotgun event. """ # MASKED: __init__ function (lines 1011-1064) def canProcess(self, event): if not self._matchEvents: return True if "*" in self._matchEvents: eventType = "*" else: eventType = event["event_type"] if eventType not in self._matchEvents: return False attributes = self._matchEvents[eventType] if attributes is None or "*" in attributes: return True if event["attribute_name"] and event["attribute_name"] in attributes: return True return False def process(self, event): """ Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict} """ # set session_uuid for UI updates if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event["session_uuid"]) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True # Get the local variables of the frame of our plugin tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s" self._logger.critical( msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals) ) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace("plugin.", "") end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat(end_time - start_time) delay = self._prettyTimeDeltaFormat(start_time - event["created_at"]) msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s" data = [ event["id"], event["created_at"].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay, ] self._engine.timing_logger.info(msg_format, *data) return self._active def _prettyTimeDeltaFormat(self, time_delta): days, remainder = divmod(time_delta.total_seconds(), 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) return "%02d:%02d:%02d:%02d.%06d" % ( days, hours, minutes, seconds, time_delta.microseconds, ) def isActive(self): """ Check if this callback is active, i.e. if events should be passed to it for processing. @return: True if this callback should process events, False otherwise. @rtype: I{bool} """ return self._active def __str__(self): """ The name of the callback. @return: The name of the callback @rtype: I{str} """ return self._name class CustomSMTPHandler(logging.handlers.SMTPHandler): """ A custom SMTPHandler subclass that will adapt it's subject depending on the error severity. """ LEVEL_SUBJECTS = { logging.ERROR: "ERROR - SG event daemon.", logging.CRITICAL: "CRITICAL - SG event daemon.", } def __init__( self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None ): args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials] if credentials: # Python 2.7 implemented the secure argument if CURRENT_PYTHON_VERSION >= PYTHON_27: args.append(secure) else: self.secure = secure logging.handlers.SMTPHandler.__init__(self, *args) def getSubject(self, record): subject = logging.handlers.SMTPHandler.getSubject(self, record) if record.levelno in self.LEVEL_SUBJECTS: return subject + " " + self.LEVEL_SUBJECTS[record.levelno] return subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ # Mostly copied from Python 2.7 implementation. try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg, ) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class EventDaemonError(Exception): """ Base error for the Shotgun event system. """ pass class ConfigError(EventDaemonError): """ Used when an error is detected in the config file. """ pass if sys.platform == "win32": class WindowsService(win32serviceutil.ServiceFramework): """ Windows service wrapper """ _svc_name_ = "ShotgunEventDaemon" _svc_display_name_ = "Shotgun Event Handler" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self._engine = Engine(_getConfigPath()) def SvcStop(self): """ Stop the Windows service. """ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop() def SvcDoRun(self): """ Start the Windows service. """ servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ""), ) self.main() def main(self): """ Primary Windows entry point """ self._engine.start() class LinuxDaemon(daemonizer.Daemon): """ Linux Daemon wrapper or wrapper used for foreground operation on Windows """ def __init__(self): self._engine = Engine(_getConfigPath()) super(LinuxDaemon, self).__init__( "shotgunEvent", self._engine.config.getEnginePIDFile() ) def start(self, daemonize=True): if not daemonize: # Setup the stdout logger handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(levelname)s:%(name)s:%(message)s") ) logging.getLogger().addHandler(handler) super(LinuxDaemon, self).start(daemonize) def _run(self): """ Start the engine's main loop """ self._engine.start() def _cleanup(self): self._engine.stop() def main(): """ """ if CURRENT_PYTHON_VERSION <= PYTHON_26: print( "Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer." ) return 3 action = None if len(sys.argv) > 1: action = sys.argv[1] if sys.platform == "win32" and action != "foreground": win32serviceutil.HandleCommandLine(WindowsService) return 0 if action: daemon = LinuxDaemon() # Find the function to call on the daemon and call it func = getattr(daemon, action, None) if action[:1] != "_" and func is not None: func() return 0 print("Unknown command: %s" % action) print("usage: %s start|stop|restart|foreground" % sys.argv[0]) return 2 def _getConfigPath(): """ Get the path of the shotgunEventDaemon configuration file. """ paths = ["/etc", os.path.dirname(__file__)] # Get the current path of the daemon script scriptPath = sys.argv[0] if scriptPath != "" and scriptPath != "-c": # Make absolute path and eliminate any symlinks if any. scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) # Add the script's directory to the paths we'll search for the config. paths[:0] = [os.path.dirname(scriptPath)] # Search for a config file. for path in paths: path = os.path.join(path, "shotgunEventDaemon.conf") if os.path.exists(path): return path # No config file was found raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths)) if __name__ == "__main__": sys.exit(main())
def __init__( self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True, ): """ @param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object. """ if not callable(callback): raise TypeError( "The callback must be a callable object (function, method or callable class instance)." ) self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True # Find a name for this object if hasattr(callback, "__name__"): self._name = callback.__name__ elif hasattr(callback, "__class__") and hasattr(callback, "__call__"): self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback))) else: raise ValueError( "registerCallback should be called with a function or a callable object instance as callback argument." ) # TODO: Get rid of this protected member access self._logger = logging.getLogger(plugin.logger.name + "." + self._name) self._logger.config = self._engine.config
1,011
1,064
#!/usr/bin/env python # # Init file for Shotgun event daemon # # chkconfig: 345 99 00 # description: Shotgun event daemon # ### BEGIN INIT INFO # Provides: shotgunEvent # Required-Start: $network # Should-Start: $remote_fs # Required-Stop: $network # Should-Stop: $remote_fs # Default-Start: 2 3 4 5 # Short-Description: Shotgun event daemon # Description: Shotgun event daemon ### END INIT INFO """ For an overview of shotgunEvents, please see raw documentation in the docs folder or an html compiled version at: http://shotgunsoftware.github.com/shotgunEvents """ from __future__ import print_function __version__ = "1.0" __version_info__ = (1, 0) # Suppress the deprecation warning about imp until we get around to replacing it import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import imp import datetime import logging import logging.handlers import os import pprint import socket import sys import time import traceback from six.moves import configparser import six.moves.cPickle as pickle from distutils.version import StrictVersion if sys.platform == "win32": import win32serviceutil import win32service import win32event import servicemanager import daemonizer import shotgun_api3 as sg from shotgun_api3.lib.sgtimezone import SgTimezone SG_TIMEZONE = SgTimezone() CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0]) PYTHON_26 = StrictVersion("2.6") PYTHON_27 = StrictVersion("2.7") EMAIL_FORMAT_STRING = """Time: %(asctime)s Logger: %(name)s Path: %(pathname)s Function: %(funcName)s Line: %(lineno)d %(message)s""" def _setFilePathOnLogger(logger, path): # Remove any previous handler. _removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler) # Add the file handler handler = logging.handlers.TimedRotatingFileHandler( path, "midnight", backupCount=10 ) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) logger.addHandler(handler) def _removeHandlersFromLogger(logger, handlerTypes=None): """ Remove all handlers or handlers of a specified type from a logger. @param logger: The logger who's handlers should be processed. @type logger: A logging.Logger object @param handlerTypes: A type of handler or list/tuple of types of handlers that should be removed from the logger. If I{None}, all handlers are removed. @type handlerTypes: L{None}, a logging.Handler subclass or I{list}/I{tuple} of logging.Handler subclasses. """ for handler in logger.handlers: if handlerTypes is None or isinstance(handler, handlerTypes): logger.removeHandler(handler) def _addMailHandlerToLogger( logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None, ): """ Configure a logger with a handler that sends emails to specified addresses. The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}. @note: Any SMTPHandler already connected to the logger will be removed. @param logger: The logger to configure @type logger: A logging.Logger instance @param toAddrs: The addresses to send the email to. @type toAddrs: A list of email addresses that will be passed on to the SMTPHandler. """ if smtpServer and fromAddr and toAddrs and emailSubject: mailHandler = CustomSMTPHandler( smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure ) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler) class Config(configparser.SafeConfigParser): def __init__(self, path): configparser.SafeConfigParser.__init__(self, os.environ) self.read(path) def getShotgunURL(self): return self.get("shotgun", "server") def getEngineScriptName(self): return self.get("shotgun", "name") def getEngineScriptKey(self): return self.get("shotgun", "key") def getEngineProxyServer(self): try: proxy_server = self.get("shotgun", "proxy_server").strip() if not proxy_server: return None return proxy_server except configparser.NoOptionError: return None def getEventIdFile(self): return self.get("daemon", "eventIdFile") def getEnginePIDFile(self): return self.get("daemon", "pidFile") def getPluginPaths(self): return [s.strip() for s in self.get("plugins", "paths").split(",")] def getSMTPServer(self): return self.get("emails", "server") def getSMTPPort(self): if self.has_option("emails", "port"): return self.getint("emails", "port") return 25 def getFromAddr(self): return self.get("emails", "from") def getToAddrs(self): return [s.strip() for s in self.get("emails", "to").split(",")] def getEmailSubject(self): return self.get("emails", "subject") def getEmailUsername(self): if self.has_option("emails", "username"): return self.get("emails", "username") return None def getEmailPassword(self): if self.has_option("emails", "password"): return self.get("emails", "password") return None def getSecureSMTP(self): if self.has_option("emails", "useTLS"): return self.getboolean("emails", "useTLS") or False return False def getLogMode(self): return self.getint("daemon", "logMode") def getLogLevel(self): return self.getint("daemon", "logging") def getMaxEventBatchSize(self): if self.has_option("daemon", "max_event_batch_size"): return self.getint("daemon", "max_event_batch_size") return 500 def getLogFile(self, filename=None): if filename is None: if self.has_option("daemon", "logFile"): filename = self.get("daemon", "logFile") else: raise ConfigError("The config file has no logFile option.") if self.has_option("daemon", "logPath"): path = self.get("daemon", "logPath") if not os.path.exists(path): os.makedirs(path) elif not os.path.isdir(path): raise ConfigError( "The logPath value in the config should point to a directory." ) path = os.path.join(path, filename) else: path = filename return path def getTimingLogFile(self): if ( not self.has_option("daemon", "timing_log") or self.get("daemon", "timing_log") != "on" ): return None return self.getLogFile() + ".timing" class Engine(object): """ The engine holds the main loop of event processing. """ def __init__(self, configPath): """ """ self._continue = True self._eventIdData = {} # Read/parse the config self.config = Config(configPath) # Get config values self._pluginCollections = [ PluginCollection(self, s) for s in self.config.getPluginPaths() ] self._sg = sg.Shotgun( self.config.getShotgunURL(), self.config.getEngineScriptName(), self.config.getEngineScriptKey(), http_proxy=self.config.getEngineProxyServer(), ) self._max_conn_retries = self.config.getint("daemon", "max_conn_retries") self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep") self._fetch_interval = self.config.getint("daemon", "fetch_interval") self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid") # Setup the loggers for the main engine if self.config.getLogMode() == 0: # Set the root logger for file output. rootLogger = logging.getLogger() rootLogger.config = self.config _setFilePathOnLogger(rootLogger, self.config.getLogFile()) print(self.config.getLogFile()) # Set the engine logger for email output. self.log = logging.getLogger("engine") self.setEmailsOnLogger(self.log, True) else: # Set the engine logger for file and email output. self.log = logging.getLogger("engine") self.log.config = self.config _setFilePathOnLogger(self.log, self.config.getLogFile()) self.setEmailsOnLogger(self.log, True) self.log.setLevel(self.config.getLogLevel()) # Setup the timing log file timing_log_filename = self.config.getTimingLogFile() if timing_log_filename: self.timing_logger = logging.getLogger("timing") self.timing_logger.setLevel(self.config.getLogLevel()) _setFilePathOnLogger(self.timing_logger, timing_log_filename) else: self.timing_logger = None super(Engine, self).__init__() def setEmailsOnLogger(self, logger, emails): # Configure the logger for email output _removeHandlersFromLogger(logger, logging.handlers.SMTPHandler) if emails is False: return smtpServer = self.config.getSMTPServer() smtpPort = self.config.getSMTPPort() fromAddr = self.config.getFromAddr() emailSubject = self.config.getEmailSubject() username = self.config.getEmailUsername() password = self.config.getEmailPassword() if self.config.getSecureSMTP(): secure = (None, None) else: secure = None if emails is True: toAddrs = self.config.getToAddrs() elif isinstance(emails, (list, tuple)): toAddrs = emails else: msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s." raise ValueError(msg % type(emails)) _addMailHandlerToLogger( logger, (smtpServer, smtpPort), fromAddr, toAddrs, emailSubject, username, password, secure, ) def start(self): """ Start the processing of events. The last processed id is loaded up from persistent storage on disk and the main loop is started. """ # TODO: Take value from config socket.setdefaulttimeout(60) # Notify which version of shotgun api we are using self.log.info("Using SG Python API version %s" % sg.__version__) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning("Keyboard interrupt. Cleaning up...") except Exception as err: msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s" self.log.critical(msg, type(err), traceback.format_exc(err)) def _loadEventIdData(self): """ Load the last processed event id from the disk If no event has ever been processed or if the eventIdFile has been deleted from disk, no id will be recoverable. In this case, we will try contacting Shotgun to get the latest event's id and we'll start processing from there. """ eventIdFile = self.config.getEventIdFile() if eventIdFile and os.path.exists(eventIdFile): try: fh = open(eventIdFile, "rb") try: self._eventIdData = pickle.load(fh) # Provide event id info to the plugin collections. Once # they've figured out what to do with it, ask them for their # last processed id. noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) # If we don't have a state it means there's no match # in the id file. First we'll search to see the latest id a # matching plugin name has elsewhere in the id file. We do # this as a fallback in case the plugins directory has been # moved. If there's no match, use the latest event id # in Shotgun. if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for pluginName, pluginState in collection.items(): if pluginName in maxPluginStates.keys(): if pluginState[0] > maxPluginStates[pluginName][0]: maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if pluginName in maxPluginStates.keys(): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() # Backwards compatibility: # Reopen the file to try to read an old-style int fh = open(eventIdFile, "rb") line = fh.readline().strip() if line.isdigit(): # The _loadEventIdData got an old-style id file containing a single # int which is the last id properly processed. lastEventId = int(line) self.log.debug( "Read last event id (%d) from file.", lastEventId ) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError( "Could not load event id from file.\n\n%s" % traceback.format_exc(err) ) else: # No id file? # Get the event data from the database. lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData() def _getLastEventIdFromDatabase(self): conn_attempts = 0 lastEventId = None while lastEventId is None: order = [{"column": "id", "direction": "desc"}] try: result = self._sg.find_one( "EventLogEntry", filters=[], fields=["id"], order=order ) except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) else: lastEventId = result["id"] self.log.info("Last event id (%d) from the SG database.", lastEventId) return lastEventId def _mainLoop(self): """ Run the event processing loop. General behavior: - Load plugins from disk - see L{load} method. - Get new events from Shotgun - Loop through events - Loop through each plugin - Loop through each callback - Send the callback an event - Once all callbacks are done in all plugins, save the eventId - Go to the next event - Once all events are processed, wait for the defined fetch interval time and start over. Caveats: - If a plugin is deemed "inactive" (an error occured during registration), skip it. - If a callback is deemed "inactive" (an error occured during callback execution), skip it. - Each time through the loop, if the pidFile is gone, stop. """ self.log.debug("Starting the event processing loop.") while self._continue: # Process events events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() # if we're lagging behind Shotgun, we received a full batch of events # skip the sleep() call in this case if len(events) < self.config.getMaxEventBatchSize(): time.sleep(self._fetch_interval) # Reload plugins for collection in self._pluginCollections: collection.load() # Make sure that newly loaded events have proper state. self._loadEventIdData() self.log.debug("Shuting down event processing loop.") def stop(self): self._continue = False def _getNewEvents(self): """ Fetch new events from Shotgun. @return: Recent events that need to be processed by the engine. @rtype: I{list} of Shotgun event dictionaries. """ nextEventId = None for newId in [ coll.getNextUnprocessedEventId() for coll in self._pluginCollections ]: if newId is not None and (nextEventId is None or newId < nextEventId): nextEventId = newId if nextEventId is not None: filters = [["id", "greater_than", nextEventId - 1]] fields = [ "id", "event_type", "attribute_name", "meta", "entity", "user", "project", "session_uuid", "created_at", ] order = [{"column": "id", "direction": "asc"}] conn_attempts = 0 while True: try: events = self._sg.find( "EventLogEntry", filters, fields, order, limit=self.config.getMaxEventBatchSize(), ) if events: self.log.debug( "Got %d events: %d to %d.", len(events), events[0]["id"], events[-1]["id"], ) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts( conn_attempts, str(err) ) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return [] def _saveEventIdData(self): """ Save an event Id to persistant storage. Next time the engine is started it will try to read the event id from this location to know at which event it should start processing. """ eventIdFile = self.config.getEventIdFile() if eventIdFile is not None: for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for colPath, state in self._eventIdData.items(): if state: try: with open(eventIdFile, "wb") as fh: # Use protocol 2 so it can also be loaded in Python 2 pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error( "Can not write event id data to %s.\n\n%s", eventIdFile, traceback.format_exc(err), ) break else: self.log.warning("No state was found. Not saving to disk.") def _checkConnectionAttempts(self, conn_attempts, msg): conn_attempts += 1 if conn_attempts == self._max_conn_retries: self.log.error( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) conn_attempts = 0 time.sleep(self._conn_retry_sleep) else: self.log.warning( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) return conn_attempts class PluginCollection(object): """ A group of plugin files in a location on the disk. """ def __init__(self, engine, path): if not os.path.isdir(path): raise ValueError("Invalid path: %s" % path) self._engine = engine self.path = path self._plugins = {} self._stateData = {} def setState(self, state): if isinstance(state, int): for plugin in self: plugin.setState(state) self._stateData[plugin.getName()] = plugin.getState() else: self._stateData = state for plugin in self: pluginState = self._stateData.get(plugin.getName()) if pluginState: plugin.setState(pluginState) def getState(self): for plugin in self: self._stateData[plugin.getName()] = plugin.getState() return self._stateData def getNextUnprocessedEventId(self): eId = None for plugin in self: if not plugin.isActive(): continue newId = plugin.getNextUnprocessedEventId() if newId is not None and (eId is None or newId < eId): eId = newId return eId def process(self, event): for plugin in self: if plugin.isActive(): plugin.process(event) else: plugin.logger.debug("Skipping: inactive.") def load(self): """ Load plugins from disk. General behavior: - Loop on all paths. - Find all valid .py plugin files. - Loop on all plugin files. - For any new plugins, load them, otherwise, refresh them. """ newPlugins = {} for basename in os.listdir(self.path): if not basename.endswith(".py") or basename.startswith("."): continue if basename in self._plugins: newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin( self._engine, os.path.join(self.path, basename) ) newPlugins[basename].load() self._plugins = newPlugins def __iter__(self): for basename in sorted(self._plugins.keys()): yield self._plugins[basename] class Plugin(object): """ The plugin class represents a file on disk which contains one or more callbacks. """ def __init__(self, engine, path): """ @param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file. """ self._engine = engine self._path = path if not os.path.isfile(path): raise ValueError("The path to the plugin is not a valid file - %s." % path) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} # Setup the plugin's logger self.logger = logging.getLogger("plugin." + self.getName()) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if self._engine.config.getLogMode() == 1: _setFilePathOnLogger( self.logger, self._engine.config.getLogFile("plugin." + self.getName()) ) def getName(self): return self._pluginName def setState(self, state): if isinstance(state, int): self._lastEventId = state elif isinstance(state, tuple): self._lastEventId, self._backlog = state else: raise ValueError("Unknown state type: %s." % type(state)) def getState(self): return (self._lastEventId, self._backlog) def getNextUnprocessedEventId(self): if self._lastEventId: nextId = self._lastEventId + 1 else: nextId = None now = datetime.datetime.now() for k in list(self._backlog): v = self._backlog[k] if v < now: self.logger.warning("Timeout elapsed on backlog event id %d.", k) del self._backlog[k] elif nextId is None or k < nextId: nextId = k return nextId def isActive(self): """ Is the current plugin active. Should it's callbacks be run? @return: True if this plugin's callbacks should be run, False otherwise. @rtype: I{bool} """ return self._active def setEmails(self, *emails): """ Set the email addresses to whom this plugin should send errors. @param emails: See L{LogFactory.getLogger}'s emails argument for info. @type emails: A I{list}/I{tuple} of email addresses or I{bool}. """ self._engine.setEmailsOnLogger(self.logger, emails) def load(self): """ Load/Reload the plugin and all its callbacks. If a plugin has never been loaded it will be loaded normally. If the plugin has been loaded before it will be reloaded only if the file has been modified on disk. In this event callbacks will all be cleared and reloaded. General behavior: - Try to load the source of the plugin. - Try to find a function called registerCallbacks in the file. - Try to run the registration function. At every step along the way, if any error occurs the whole plugin will be deactivated and the function will return. """ # Check file mtime mtime = os.path.getmtime(self._path) if self._mtime is None: self._engine.log.info("Loading plugin at %s" % self._path) elif self._mtime < mtime: self._engine.log.info("Reloading plugin at %s" % self._path) else: # The mtime of file is equal or older. We don't need to do anything. return # Reset values self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error( "Could not load the plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) return regFunc = getattr(plugin, "registerCallbacks", None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical( "Error running register callback function from plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) self._active = False else: self._engine.log.critical( "Did not find a registerCallbacks function in plugin at %s.", self._path ) self._active = False def registerCallback( self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True, ): """ Register a callback in the plugin. """ global sg sgConnection = sg.Shotgun( self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer(), ) self._callbacks.append( Callback( callback, self, self._engine, sgConnection, matchEvents, args, stopOnError, ) ) def process(self, event): if event["id"] in self._backlog: if self._process(event): self.logger.info("Processed id %d from backlog." % event["id"]) del self._backlog[event["id"]] self._updateLastEventId(event) elif self._lastEventId is not None and event["id"] <= self._lastEventId: msg = "Event %d is too old. Last event processed was (%d)." self.logger.debug(msg, event["id"], self._lastEventId) else: if self._process(event): self._updateLastEventId(event) return self._active def _process(self, event): for callback in self: if callback.isActive(): if callback.canProcess(event): msg = "Dispatching event %d to callback %s." self.logger.debug(msg, event["id"], str(callback)) if not callback.process(event): # A callback in the plugin failed. Deactivate the whole # plugin. self._active = False break else: msg = "Skipping inactive callback %s in plugin." self.logger.debug(msg, str(callback)) return self._active def _updateLastEventId(self, event): BACKLOG_TIMEOUT = ( 5 # time in minutes after which we consider a pending event won't happen ) if self._lastEventId is not None and event["id"] > self._lastEventId + 1: event_date = event["created_at"].replace(tzinfo=None) if datetime.datetime.now() > ( event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT) ): # the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event # with a lower id should have shown up in the EventLog by now if it actually happened if event["id"] == self._lastEventId + 2: self.logger.info( "Event %d never happened - ignoring.", self._lastEventId + 1 ) else: self.logger.info( "Events %d-%d never happened - ignoring.", self._lastEventId + 1, event["id"] - 1, ) else: # in this case, we want to add the missing events to the backlog as they could show up in the # EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range # them to show up until they expire expiration = datetime.datetime.now() + datetime.timedelta( minutes=BACKLOG_TIMEOUT ) for skippedId in range(self._lastEventId + 1, event["id"]): self.logger.info("Adding event id %d to backlog.", skippedId) self._backlog[skippedId] = expiration self._lastEventId = event["id"] def __iter__(self): """ A plugin is iterable and will iterate over all its L{Callback} objects. """ return self._callbacks.__iter__() def __str__(self): """ Provide the name of the plugin when it is cast as string. @return: The name of the plugin. @rtype: I{str} """ return self.getName() class Registrar(object): """ See public API docs in docs folder. """ def __init__(self, plugin): """ Wrap a plugin so it can be passed to a user. """ self._plugin = plugin self._allowed = ["logger", "setEmails", "registerCallback"] def getLogger(self): """ Get the logger for this plugin. @return: The logger configured for this plugin. @rtype: L{logging.Logger} """ # TODO: Fix this ugly protected member access return self.logger def __getattr__(self, name): if name in self._allowed: return getattr(self._plugin, name) raise AttributeError( "type object '%s' has no attribute '%s'" % (type(self).__name__, name) ) class Callback(object): """ A part of a plugin that can be called to process a Shotgun event. """ def __init__( self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True, ): """ @param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object. """ if not callable(callback): raise TypeError( "The callback must be a callable object (function, method or callable class instance)." ) self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True # Find a name for this object if hasattr(callback, "__name__"): self._name = callback.__name__ elif hasattr(callback, "__class__") and hasattr(callback, "__call__"): self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback))) else: raise ValueError( "registerCallback should be called with a function or a callable object instance as callback argument." ) # TODO: Get rid of this protected member access self._logger = logging.getLogger(plugin.logger.name + "." + self._name) self._logger.config = self._engine.config def canProcess(self, event): if not self._matchEvents: return True if "*" in self._matchEvents: eventType = "*" else: eventType = event["event_type"] if eventType not in self._matchEvents: return False attributes = self._matchEvents[eventType] if attributes is None or "*" in attributes: return True if event["attribute_name"] and event["attribute_name"] in attributes: return True return False def process(self, event): """ Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict} """ # set session_uuid for UI updates if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event["session_uuid"]) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True # Get the local variables of the frame of our plugin tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s" self._logger.critical( msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals) ) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace("plugin.", "") end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat(end_time - start_time) delay = self._prettyTimeDeltaFormat(start_time - event["created_at"]) msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s" data = [ event["id"], event["created_at"].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay, ] self._engine.timing_logger.info(msg_format, *data) return self._active def _prettyTimeDeltaFormat(self, time_delta): days, remainder = divmod(time_delta.total_seconds(), 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) return "%02d:%02d:%02d:%02d.%06d" % ( days, hours, minutes, seconds, time_delta.microseconds, ) def isActive(self): """ Check if this callback is active, i.e. if events should be passed to it for processing. @return: True if this callback should process events, False otherwise. @rtype: I{bool} """ return self._active def __str__(self): """ The name of the callback. @return: The name of the callback @rtype: I{str} """ return self._name class CustomSMTPHandler(logging.handlers.SMTPHandler): """ A custom SMTPHandler subclass that will adapt it's subject depending on the error severity. """ LEVEL_SUBJECTS = { logging.ERROR: "ERROR - SG event daemon.", logging.CRITICAL: "CRITICAL - SG event daemon.", } def __init__( self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None ): args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials] if credentials: # Python 2.7 implemented the secure argument if CURRENT_PYTHON_VERSION >= PYTHON_27: args.append(secure) else: self.secure = secure logging.handlers.SMTPHandler.__init__(self, *args) def getSubject(self, record): subject = logging.handlers.SMTPHandler.getSubject(self, record) if record.levelno in self.LEVEL_SUBJECTS: return subject + " " + self.LEVEL_SUBJECTS[record.levelno] return subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ # Mostly copied from Python 2.7 implementation. try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg, ) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class EventDaemonError(Exception): """ Base error for the Shotgun event system. """ pass class ConfigError(EventDaemonError): """ Used when an error is detected in the config file. """ pass if sys.platform == "win32": class WindowsService(win32serviceutil.ServiceFramework): """ Windows service wrapper """ _svc_name_ = "ShotgunEventDaemon" _svc_display_name_ = "Shotgun Event Handler" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self._engine = Engine(_getConfigPath()) def SvcStop(self): """ Stop the Windows service. """ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop() def SvcDoRun(self): """ Start the Windows service. """ servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ""), ) self.main() def main(self): """ Primary Windows entry point """ self._engine.start() class LinuxDaemon(daemonizer.Daemon): """ Linux Daemon wrapper or wrapper used for foreground operation on Windows """ def __init__(self): self._engine = Engine(_getConfigPath()) super(LinuxDaemon, self).__init__( "shotgunEvent", self._engine.config.getEnginePIDFile() ) def start(self, daemonize=True): if not daemonize: # Setup the stdout logger handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(levelname)s:%(name)s:%(message)s") ) logging.getLogger().addHandler(handler) super(LinuxDaemon, self).start(daemonize) def _run(self): """ Start the engine's main loop """ self._engine.start() def _cleanup(self): self._engine.stop() def main(): """ """ if CURRENT_PYTHON_VERSION <= PYTHON_26: print( "Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer." ) return 3 action = None if len(sys.argv) > 1: action = sys.argv[1] if sys.platform == "win32" and action != "foreground": win32serviceutil.HandleCommandLine(WindowsService) return 0 if action: daemon = LinuxDaemon() # Find the function to call on the daemon and call it func = getattr(daemon, action, None) if action[:1] != "_" and func is not None: func() return 0 print("Unknown command: %s" % action) print("usage: %s start|stop|restart|foreground" % sys.argv[0]) return 2 def _getConfigPath(): """ Get the path of the shotgunEventDaemon configuration file. """ paths = ["/etc", os.path.dirname(__file__)] # Get the current path of the daemon script scriptPath = sys.argv[0] if scriptPath != "" and scriptPath != "-c": # Make absolute path and eliminate any symlinks if any. scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) # Add the script's directory to the paths we'll search for the config. paths[:0] = [os.path.dirname(scriptPath)] # Search for a config file. for path in paths: path = os.path.join(path, "shotgunEventDaemon.conf") if os.path.exists(path): return path # No config file was found raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths)) if __name__ == "__main__": sys.exit(main())
process
Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict}
#!/usr/bin/env python # # Init file for Shotgun event daemon # # chkconfig: 345 99 00 # description: Shotgun event daemon # ### BEGIN INIT INFO # Provides: shotgunEvent # Required-Start: $network # Should-Start: $remote_fs # Required-Stop: $network # Should-Stop: $remote_fs # Default-Start: 2 3 4 5 # Short-Description: Shotgun event daemon # Description: Shotgun event daemon ### END INIT INFO """ For an overview of shotgunEvents, please see raw documentation in the docs folder or an html compiled version at: http://shotgunsoftware.github.com/shotgunEvents """ from __future__ import print_function __version__ = "1.0" __version_info__ = (1, 0) # Suppress the deprecation warning about imp until we get around to replacing it import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import imp import datetime import logging import logging.handlers import os import pprint import socket import sys import time import traceback from six.moves import configparser import six.moves.cPickle as pickle from distutils.version import StrictVersion if sys.platform == "win32": import win32serviceutil import win32service import win32event import servicemanager import daemonizer import shotgun_api3 as sg from shotgun_api3.lib.sgtimezone import SgTimezone SG_TIMEZONE = SgTimezone() CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0]) PYTHON_26 = StrictVersion("2.6") PYTHON_27 = StrictVersion("2.7") EMAIL_FORMAT_STRING = """Time: %(asctime)s Logger: %(name)s Path: %(pathname)s Function: %(funcName)s Line: %(lineno)d %(message)s""" def _setFilePathOnLogger(logger, path): # Remove any previous handler. _removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler) # Add the file handler handler = logging.handlers.TimedRotatingFileHandler( path, "midnight", backupCount=10 ) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) logger.addHandler(handler) def _removeHandlersFromLogger(logger, handlerTypes=None): """ Remove all handlers or handlers of a specified type from a logger. @param logger: The logger who's handlers should be processed. @type logger: A logging.Logger object @param handlerTypes: A type of handler or list/tuple of types of handlers that should be removed from the logger. If I{None}, all handlers are removed. @type handlerTypes: L{None}, a logging.Handler subclass or I{list}/I{tuple} of logging.Handler subclasses. """ for handler in logger.handlers: if handlerTypes is None or isinstance(handler, handlerTypes): logger.removeHandler(handler) def _addMailHandlerToLogger( logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None, ): """ Configure a logger with a handler that sends emails to specified addresses. The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}. @note: Any SMTPHandler already connected to the logger will be removed. @param logger: The logger to configure @type logger: A logging.Logger instance @param toAddrs: The addresses to send the email to. @type toAddrs: A list of email addresses that will be passed on to the SMTPHandler. """ if smtpServer and fromAddr and toAddrs and emailSubject: mailHandler = CustomSMTPHandler( smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure ) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler) class Config(configparser.SafeConfigParser): def __init__(self, path): configparser.SafeConfigParser.__init__(self, os.environ) self.read(path) def getShotgunURL(self): return self.get("shotgun", "server") def getEngineScriptName(self): return self.get("shotgun", "name") def getEngineScriptKey(self): return self.get("shotgun", "key") def getEngineProxyServer(self): try: proxy_server = self.get("shotgun", "proxy_server").strip() if not proxy_server: return None return proxy_server except configparser.NoOptionError: return None def getEventIdFile(self): return self.get("daemon", "eventIdFile") def getEnginePIDFile(self): return self.get("daemon", "pidFile") def getPluginPaths(self): return [s.strip() for s in self.get("plugins", "paths").split(",")] def getSMTPServer(self): return self.get("emails", "server") def getSMTPPort(self): if self.has_option("emails", "port"): return self.getint("emails", "port") return 25 def getFromAddr(self): return self.get("emails", "from") def getToAddrs(self): return [s.strip() for s in self.get("emails", "to").split(",")] def getEmailSubject(self): return self.get("emails", "subject") def getEmailUsername(self): if self.has_option("emails", "username"): return self.get("emails", "username") return None def getEmailPassword(self): if self.has_option("emails", "password"): return self.get("emails", "password") return None def getSecureSMTP(self): if self.has_option("emails", "useTLS"): return self.getboolean("emails", "useTLS") or False return False def getLogMode(self): return self.getint("daemon", "logMode") def getLogLevel(self): return self.getint("daemon", "logging") def getMaxEventBatchSize(self): if self.has_option("daemon", "max_event_batch_size"): return self.getint("daemon", "max_event_batch_size") return 500 def getLogFile(self, filename=None): if filename is None: if self.has_option("daemon", "logFile"): filename = self.get("daemon", "logFile") else: raise ConfigError("The config file has no logFile option.") if self.has_option("daemon", "logPath"): path = self.get("daemon", "logPath") if not os.path.exists(path): os.makedirs(path) elif not os.path.isdir(path): raise ConfigError( "The logPath value in the config should point to a directory." ) path = os.path.join(path, filename) else: path = filename return path def getTimingLogFile(self): if ( not self.has_option("daemon", "timing_log") or self.get("daemon", "timing_log") != "on" ): return None return self.getLogFile() + ".timing" class Engine(object): """ The engine holds the main loop of event processing. """ def __init__(self, configPath): """ """ self._continue = True self._eventIdData = {} # Read/parse the config self.config = Config(configPath) # Get config values self._pluginCollections = [ PluginCollection(self, s) for s in self.config.getPluginPaths() ] self._sg = sg.Shotgun( self.config.getShotgunURL(), self.config.getEngineScriptName(), self.config.getEngineScriptKey(), http_proxy=self.config.getEngineProxyServer(), ) self._max_conn_retries = self.config.getint("daemon", "max_conn_retries") self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep") self._fetch_interval = self.config.getint("daemon", "fetch_interval") self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid") # Setup the loggers for the main engine if self.config.getLogMode() == 0: # Set the root logger for file output. rootLogger = logging.getLogger() rootLogger.config = self.config _setFilePathOnLogger(rootLogger, self.config.getLogFile()) print(self.config.getLogFile()) # Set the engine logger for email output. self.log = logging.getLogger("engine") self.setEmailsOnLogger(self.log, True) else: # Set the engine logger for file and email output. self.log = logging.getLogger("engine") self.log.config = self.config _setFilePathOnLogger(self.log, self.config.getLogFile()) self.setEmailsOnLogger(self.log, True) self.log.setLevel(self.config.getLogLevel()) # Setup the timing log file timing_log_filename = self.config.getTimingLogFile() if timing_log_filename: self.timing_logger = logging.getLogger("timing") self.timing_logger.setLevel(self.config.getLogLevel()) _setFilePathOnLogger(self.timing_logger, timing_log_filename) else: self.timing_logger = None super(Engine, self).__init__() def setEmailsOnLogger(self, logger, emails): # Configure the logger for email output _removeHandlersFromLogger(logger, logging.handlers.SMTPHandler) if emails is False: return smtpServer = self.config.getSMTPServer() smtpPort = self.config.getSMTPPort() fromAddr = self.config.getFromAddr() emailSubject = self.config.getEmailSubject() username = self.config.getEmailUsername() password = self.config.getEmailPassword() if self.config.getSecureSMTP(): secure = (None, None) else: secure = None if emails is True: toAddrs = self.config.getToAddrs() elif isinstance(emails, (list, tuple)): toAddrs = emails else: msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s." raise ValueError(msg % type(emails)) _addMailHandlerToLogger( logger, (smtpServer, smtpPort), fromAddr, toAddrs, emailSubject, username, password, secure, ) def start(self): """ Start the processing of events. The last processed id is loaded up from persistent storage on disk and the main loop is started. """ # TODO: Take value from config socket.setdefaulttimeout(60) # Notify which version of shotgun api we are using self.log.info("Using SG Python API version %s" % sg.__version__) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning("Keyboard interrupt. Cleaning up...") except Exception as err: msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s" self.log.critical(msg, type(err), traceback.format_exc(err)) def _loadEventIdData(self): """ Load the last processed event id from the disk If no event has ever been processed or if the eventIdFile has been deleted from disk, no id will be recoverable. In this case, we will try contacting Shotgun to get the latest event's id and we'll start processing from there. """ eventIdFile = self.config.getEventIdFile() if eventIdFile and os.path.exists(eventIdFile): try: fh = open(eventIdFile, "rb") try: self._eventIdData = pickle.load(fh) # Provide event id info to the plugin collections. Once # they've figured out what to do with it, ask them for their # last processed id. noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) # If we don't have a state it means there's no match # in the id file. First we'll search to see the latest id a # matching plugin name has elsewhere in the id file. We do # this as a fallback in case the plugins directory has been # moved. If there's no match, use the latest event id # in Shotgun. if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for pluginName, pluginState in collection.items(): if pluginName in maxPluginStates.keys(): if pluginState[0] > maxPluginStates[pluginName][0]: maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if pluginName in maxPluginStates.keys(): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() # Backwards compatibility: # Reopen the file to try to read an old-style int fh = open(eventIdFile, "rb") line = fh.readline().strip() if line.isdigit(): # The _loadEventIdData got an old-style id file containing a single # int which is the last id properly processed. lastEventId = int(line) self.log.debug( "Read last event id (%d) from file.", lastEventId ) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError( "Could not load event id from file.\n\n%s" % traceback.format_exc(err) ) else: # No id file? # Get the event data from the database. lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData() def _getLastEventIdFromDatabase(self): conn_attempts = 0 lastEventId = None while lastEventId is None: order = [{"column": "id", "direction": "desc"}] try: result = self._sg.find_one( "EventLogEntry", filters=[], fields=["id"], order=order ) except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) else: lastEventId = result["id"] self.log.info("Last event id (%d) from the SG database.", lastEventId) return lastEventId def _mainLoop(self): """ Run the event processing loop. General behavior: - Load plugins from disk - see L{load} method. - Get new events from Shotgun - Loop through events - Loop through each plugin - Loop through each callback - Send the callback an event - Once all callbacks are done in all plugins, save the eventId - Go to the next event - Once all events are processed, wait for the defined fetch interval time and start over. Caveats: - If a plugin is deemed "inactive" (an error occured during registration), skip it. - If a callback is deemed "inactive" (an error occured during callback execution), skip it. - Each time through the loop, if the pidFile is gone, stop. """ self.log.debug("Starting the event processing loop.") while self._continue: # Process events events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() # if we're lagging behind Shotgun, we received a full batch of events # skip the sleep() call in this case if len(events) < self.config.getMaxEventBatchSize(): time.sleep(self._fetch_interval) # Reload plugins for collection in self._pluginCollections: collection.load() # Make sure that newly loaded events have proper state. self._loadEventIdData() self.log.debug("Shuting down event processing loop.") def stop(self): self._continue = False def _getNewEvents(self): """ Fetch new events from Shotgun. @return: Recent events that need to be processed by the engine. @rtype: I{list} of Shotgun event dictionaries. """ nextEventId = None for newId in [ coll.getNextUnprocessedEventId() for coll in self._pluginCollections ]: if newId is not None and (nextEventId is None or newId < nextEventId): nextEventId = newId if nextEventId is not None: filters = [["id", "greater_than", nextEventId - 1]] fields = [ "id", "event_type", "attribute_name", "meta", "entity", "user", "project", "session_uuid", "created_at", ] order = [{"column": "id", "direction": "asc"}] conn_attempts = 0 while True: try: events = self._sg.find( "EventLogEntry", filters, fields, order, limit=self.config.getMaxEventBatchSize(), ) if events: self.log.debug( "Got %d events: %d to %d.", len(events), events[0]["id"], events[-1]["id"], ) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts( conn_attempts, str(err) ) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return [] def _saveEventIdData(self): """ Save an event Id to persistant storage. Next time the engine is started it will try to read the event id from this location to know at which event it should start processing. """ eventIdFile = self.config.getEventIdFile() if eventIdFile is not None: for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for colPath, state in self._eventIdData.items(): if state: try: with open(eventIdFile, "wb") as fh: # Use protocol 2 so it can also be loaded in Python 2 pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error( "Can not write event id data to %s.\n\n%s", eventIdFile, traceback.format_exc(err), ) break else: self.log.warning("No state was found. Not saving to disk.") def _checkConnectionAttempts(self, conn_attempts, msg): conn_attempts += 1 if conn_attempts == self._max_conn_retries: self.log.error( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) conn_attempts = 0 time.sleep(self._conn_retry_sleep) else: self.log.warning( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) return conn_attempts class PluginCollection(object): """ A group of plugin files in a location on the disk. """ def __init__(self, engine, path): if not os.path.isdir(path): raise ValueError("Invalid path: %s" % path) self._engine = engine self.path = path self._plugins = {} self._stateData = {} def setState(self, state): if isinstance(state, int): for plugin in self: plugin.setState(state) self._stateData[plugin.getName()] = plugin.getState() else: self._stateData = state for plugin in self: pluginState = self._stateData.get(plugin.getName()) if pluginState: plugin.setState(pluginState) def getState(self): for plugin in self: self._stateData[plugin.getName()] = plugin.getState() return self._stateData def getNextUnprocessedEventId(self): eId = None for plugin in self: if not plugin.isActive(): continue newId = plugin.getNextUnprocessedEventId() if newId is not None and (eId is None or newId < eId): eId = newId return eId def process(self, event): for plugin in self: if plugin.isActive(): plugin.process(event) else: plugin.logger.debug("Skipping: inactive.") def load(self): """ Load plugins from disk. General behavior: - Loop on all paths. - Find all valid .py plugin files. - Loop on all plugin files. - For any new plugins, load them, otherwise, refresh them. """ newPlugins = {} for basename in os.listdir(self.path): if not basename.endswith(".py") or basename.startswith("."): continue if basename in self._plugins: newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin( self._engine, os.path.join(self.path, basename) ) newPlugins[basename].load() self._plugins = newPlugins def __iter__(self): for basename in sorted(self._plugins.keys()): yield self._plugins[basename] class Plugin(object): """ The plugin class represents a file on disk which contains one or more callbacks. """ def __init__(self, engine, path): """ @param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file. """ self._engine = engine self._path = path if not os.path.isfile(path): raise ValueError("The path to the plugin is not a valid file - %s." % path) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} # Setup the plugin's logger self.logger = logging.getLogger("plugin." + self.getName()) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if self._engine.config.getLogMode() == 1: _setFilePathOnLogger( self.logger, self._engine.config.getLogFile("plugin." + self.getName()) ) def getName(self): return self._pluginName def setState(self, state): if isinstance(state, int): self._lastEventId = state elif isinstance(state, tuple): self._lastEventId, self._backlog = state else: raise ValueError("Unknown state type: %s." % type(state)) def getState(self): return (self._lastEventId, self._backlog) def getNextUnprocessedEventId(self): if self._lastEventId: nextId = self._lastEventId + 1 else: nextId = None now = datetime.datetime.now() for k in list(self._backlog): v = self._backlog[k] if v < now: self.logger.warning("Timeout elapsed on backlog event id %d.", k) del self._backlog[k] elif nextId is None or k < nextId: nextId = k return nextId def isActive(self): """ Is the current plugin active. Should it's callbacks be run? @return: True if this plugin's callbacks should be run, False otherwise. @rtype: I{bool} """ return self._active def setEmails(self, *emails): """ Set the email addresses to whom this plugin should send errors. @param emails: See L{LogFactory.getLogger}'s emails argument for info. @type emails: A I{list}/I{tuple} of email addresses or I{bool}. """ self._engine.setEmailsOnLogger(self.logger, emails) def load(self): """ Load/Reload the plugin and all its callbacks. If a plugin has never been loaded it will be loaded normally. If the plugin has been loaded before it will be reloaded only if the file has been modified on disk. In this event callbacks will all be cleared and reloaded. General behavior: - Try to load the source of the plugin. - Try to find a function called registerCallbacks in the file. - Try to run the registration function. At every step along the way, if any error occurs the whole plugin will be deactivated and the function will return. """ # Check file mtime mtime = os.path.getmtime(self._path) if self._mtime is None: self._engine.log.info("Loading plugin at %s" % self._path) elif self._mtime < mtime: self._engine.log.info("Reloading plugin at %s" % self._path) else: # The mtime of file is equal or older. We don't need to do anything. return # Reset values self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error( "Could not load the plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) return regFunc = getattr(plugin, "registerCallbacks", None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical( "Error running register callback function from plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) self._active = False else: self._engine.log.critical( "Did not find a registerCallbacks function in plugin at %s.", self._path ) self._active = False def registerCallback( self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True, ): """ Register a callback in the plugin. """ global sg sgConnection = sg.Shotgun( self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer(), ) self._callbacks.append( Callback( callback, self, self._engine, sgConnection, matchEvents, args, stopOnError, ) ) def process(self, event): if event["id"] in self._backlog: if self._process(event): self.logger.info("Processed id %d from backlog." % event["id"]) del self._backlog[event["id"]] self._updateLastEventId(event) elif self._lastEventId is not None and event["id"] <= self._lastEventId: msg = "Event %d is too old. Last event processed was (%d)." self.logger.debug(msg, event["id"], self._lastEventId) else: if self._process(event): self._updateLastEventId(event) return self._active def _process(self, event): for callback in self: if callback.isActive(): if callback.canProcess(event): msg = "Dispatching event %d to callback %s." self.logger.debug(msg, event["id"], str(callback)) if not callback.process(event): # A callback in the plugin failed. Deactivate the whole # plugin. self._active = False break else: msg = "Skipping inactive callback %s in plugin." self.logger.debug(msg, str(callback)) return self._active def _updateLastEventId(self, event): BACKLOG_TIMEOUT = ( 5 # time in minutes after which we consider a pending event won't happen ) if self._lastEventId is not None and event["id"] > self._lastEventId + 1: event_date = event["created_at"].replace(tzinfo=None) if datetime.datetime.now() > ( event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT) ): # the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event # with a lower id should have shown up in the EventLog by now if it actually happened if event["id"] == self._lastEventId + 2: self.logger.info( "Event %d never happened - ignoring.", self._lastEventId + 1 ) else: self.logger.info( "Events %d-%d never happened - ignoring.", self._lastEventId + 1, event["id"] - 1, ) else: # in this case, we want to add the missing events to the backlog as they could show up in the # EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range # them to show up until they expire expiration = datetime.datetime.now() + datetime.timedelta( minutes=BACKLOG_TIMEOUT ) for skippedId in range(self._lastEventId + 1, event["id"]): self.logger.info("Adding event id %d to backlog.", skippedId) self._backlog[skippedId] = expiration self._lastEventId = event["id"] def __iter__(self): """ A plugin is iterable and will iterate over all its L{Callback} objects. """ return self._callbacks.__iter__() def __str__(self): """ Provide the name of the plugin when it is cast as string. @return: The name of the plugin. @rtype: I{str} """ return self.getName() class Registrar(object): """ See public API docs in docs folder. """ def __init__(self, plugin): """ Wrap a plugin so it can be passed to a user. """ self._plugin = plugin self._allowed = ["logger", "setEmails", "registerCallback"] def getLogger(self): """ Get the logger for this plugin. @return: The logger configured for this plugin. @rtype: L{logging.Logger} """ # TODO: Fix this ugly protected member access return self.logger def __getattr__(self, name): if name in self._allowed: return getattr(self._plugin, name) raise AttributeError( "type object '%s' has no attribute '%s'" % (type(self).__name__, name) ) class Callback(object): """ A part of a plugin that can be called to process a Shotgun event. """ def __init__( self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True, ): """ @param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object. """ if not callable(callback): raise TypeError( "The callback must be a callable object (function, method or callable class instance)." ) self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True # Find a name for this object if hasattr(callback, "__name__"): self._name = callback.__name__ elif hasattr(callback, "__class__") and hasattr(callback, "__call__"): self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback))) else: raise ValueError( "registerCallback should be called with a function or a callable object instance as callback argument." ) # TODO: Get rid of this protected member access self._logger = logging.getLogger(plugin.logger.name + "." + self._name) self._logger.config = self._engine.config def canProcess(self, event): if not self._matchEvents: return True if "*" in self._matchEvents: eventType = "*" else: eventType = event["event_type"] if eventType not in self._matchEvents: return False attributes = self._matchEvents[eventType] if attributes is None or "*" in attributes: return True if event["attribute_name"] and event["attribute_name"] in attributes: return True return False # MASKED: process function (lines 1087-1142) def _prettyTimeDeltaFormat(self, time_delta): days, remainder = divmod(time_delta.total_seconds(), 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) return "%02d:%02d:%02d:%02d.%06d" % ( days, hours, minutes, seconds, time_delta.microseconds, ) def isActive(self): """ Check if this callback is active, i.e. if events should be passed to it for processing. @return: True if this callback should process events, False otherwise. @rtype: I{bool} """ return self._active def __str__(self): """ The name of the callback. @return: The name of the callback @rtype: I{str} """ return self._name class CustomSMTPHandler(logging.handlers.SMTPHandler): """ A custom SMTPHandler subclass that will adapt it's subject depending on the error severity. """ LEVEL_SUBJECTS = { logging.ERROR: "ERROR - SG event daemon.", logging.CRITICAL: "CRITICAL - SG event daemon.", } def __init__( self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None ): args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials] if credentials: # Python 2.7 implemented the secure argument if CURRENT_PYTHON_VERSION >= PYTHON_27: args.append(secure) else: self.secure = secure logging.handlers.SMTPHandler.__init__(self, *args) def getSubject(self, record): subject = logging.handlers.SMTPHandler.getSubject(self, record) if record.levelno in self.LEVEL_SUBJECTS: return subject + " " + self.LEVEL_SUBJECTS[record.levelno] return subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ # Mostly copied from Python 2.7 implementation. try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg, ) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class EventDaemonError(Exception): """ Base error for the Shotgun event system. """ pass class ConfigError(EventDaemonError): """ Used when an error is detected in the config file. """ pass if sys.platform == "win32": class WindowsService(win32serviceutil.ServiceFramework): """ Windows service wrapper """ _svc_name_ = "ShotgunEventDaemon" _svc_display_name_ = "Shotgun Event Handler" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self._engine = Engine(_getConfigPath()) def SvcStop(self): """ Stop the Windows service. """ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop() def SvcDoRun(self): """ Start the Windows service. """ servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ""), ) self.main() def main(self): """ Primary Windows entry point """ self._engine.start() class LinuxDaemon(daemonizer.Daemon): """ Linux Daemon wrapper or wrapper used for foreground operation on Windows """ def __init__(self): self._engine = Engine(_getConfigPath()) super(LinuxDaemon, self).__init__( "shotgunEvent", self._engine.config.getEnginePIDFile() ) def start(self, daemonize=True): if not daemonize: # Setup the stdout logger handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(levelname)s:%(name)s:%(message)s") ) logging.getLogger().addHandler(handler) super(LinuxDaemon, self).start(daemonize) def _run(self): """ Start the engine's main loop """ self._engine.start() def _cleanup(self): self._engine.stop() def main(): """ """ if CURRENT_PYTHON_VERSION <= PYTHON_26: print( "Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer." ) return 3 action = None if len(sys.argv) > 1: action = sys.argv[1] if sys.platform == "win32" and action != "foreground": win32serviceutil.HandleCommandLine(WindowsService) return 0 if action: daemon = LinuxDaemon() # Find the function to call on the daemon and call it func = getattr(daemon, action, None) if action[:1] != "_" and func is not None: func() return 0 print("Unknown command: %s" % action) print("usage: %s start|stop|restart|foreground" % sys.argv[0]) return 2 def _getConfigPath(): """ Get the path of the shotgunEventDaemon configuration file. """ paths = ["/etc", os.path.dirname(__file__)] # Get the current path of the daemon script scriptPath = sys.argv[0] if scriptPath != "" and scriptPath != "-c": # Make absolute path and eliminate any symlinks if any. scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) # Add the script's directory to the paths we'll search for the config. paths[:0] = [os.path.dirname(scriptPath)] # Search for a config file. for path in paths: path = os.path.join(path, "shotgunEventDaemon.conf") if os.path.exists(path): return path # No config file was found raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths)) if __name__ == "__main__": sys.exit(main())
def process(self, event): """ Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict} """ # set session_uuid for UI updates if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event["session_uuid"]) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True # Get the local variables of the frame of our plugin tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s" self._logger.critical( msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals) ) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace("plugin.", "") end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat(end_time - start_time) delay = self._prettyTimeDeltaFormat(start_time - event["created_at"]) msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s" data = [ event["id"], event["created_at"].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay, ] self._engine.timing_logger.info(msg_format, *data) return self._active
1,087
1,142
#!/usr/bin/env python # # Init file for Shotgun event daemon # # chkconfig: 345 99 00 # description: Shotgun event daemon # ### BEGIN INIT INFO # Provides: shotgunEvent # Required-Start: $network # Should-Start: $remote_fs # Required-Stop: $network # Should-Stop: $remote_fs # Default-Start: 2 3 4 5 # Short-Description: Shotgun event daemon # Description: Shotgun event daemon ### END INIT INFO """ For an overview of shotgunEvents, please see raw documentation in the docs folder or an html compiled version at: http://shotgunsoftware.github.com/shotgunEvents """ from __future__ import print_function __version__ = "1.0" __version_info__ = (1, 0) # Suppress the deprecation warning about imp until we get around to replacing it import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import imp import datetime import logging import logging.handlers import os import pprint import socket import sys import time import traceback from six.moves import configparser import six.moves.cPickle as pickle from distutils.version import StrictVersion if sys.platform == "win32": import win32serviceutil import win32service import win32event import servicemanager import daemonizer import shotgun_api3 as sg from shotgun_api3.lib.sgtimezone import SgTimezone SG_TIMEZONE = SgTimezone() CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0]) PYTHON_26 = StrictVersion("2.6") PYTHON_27 = StrictVersion("2.7") EMAIL_FORMAT_STRING = """Time: %(asctime)s Logger: %(name)s Path: %(pathname)s Function: %(funcName)s Line: %(lineno)d %(message)s""" def _setFilePathOnLogger(logger, path): # Remove any previous handler. _removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler) # Add the file handler handler = logging.handlers.TimedRotatingFileHandler( path, "midnight", backupCount=10 ) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) logger.addHandler(handler) def _removeHandlersFromLogger(logger, handlerTypes=None): """ Remove all handlers or handlers of a specified type from a logger. @param logger: The logger who's handlers should be processed. @type logger: A logging.Logger object @param handlerTypes: A type of handler or list/tuple of types of handlers that should be removed from the logger. If I{None}, all handlers are removed. @type handlerTypes: L{None}, a logging.Handler subclass or I{list}/I{tuple} of logging.Handler subclasses. """ for handler in logger.handlers: if handlerTypes is None or isinstance(handler, handlerTypes): logger.removeHandler(handler) def _addMailHandlerToLogger( logger, smtpServer, fromAddr, toAddrs, emailSubject, username=None, password=None, secure=None, ): """ Configure a logger with a handler that sends emails to specified addresses. The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}. @note: Any SMTPHandler already connected to the logger will be removed. @param logger: The logger to configure @type logger: A logging.Logger instance @param toAddrs: The addresses to send the email to. @type toAddrs: A list of email addresses that will be passed on to the SMTPHandler. """ if smtpServer and fromAddr and toAddrs and emailSubject: mailHandler = CustomSMTPHandler( smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure ) mailHandler.setLevel(logging.ERROR) mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING) mailHandler.setFormatter(mailFormatter) logger.addHandler(mailHandler) class Config(configparser.SafeConfigParser): def __init__(self, path): configparser.SafeConfigParser.__init__(self, os.environ) self.read(path) def getShotgunURL(self): return self.get("shotgun", "server") def getEngineScriptName(self): return self.get("shotgun", "name") def getEngineScriptKey(self): return self.get("shotgun", "key") def getEngineProxyServer(self): try: proxy_server = self.get("shotgun", "proxy_server").strip() if not proxy_server: return None return proxy_server except configparser.NoOptionError: return None def getEventIdFile(self): return self.get("daemon", "eventIdFile") def getEnginePIDFile(self): return self.get("daemon", "pidFile") def getPluginPaths(self): return [s.strip() for s in self.get("plugins", "paths").split(",")] def getSMTPServer(self): return self.get("emails", "server") def getSMTPPort(self): if self.has_option("emails", "port"): return self.getint("emails", "port") return 25 def getFromAddr(self): return self.get("emails", "from") def getToAddrs(self): return [s.strip() for s in self.get("emails", "to").split(",")] def getEmailSubject(self): return self.get("emails", "subject") def getEmailUsername(self): if self.has_option("emails", "username"): return self.get("emails", "username") return None def getEmailPassword(self): if self.has_option("emails", "password"): return self.get("emails", "password") return None def getSecureSMTP(self): if self.has_option("emails", "useTLS"): return self.getboolean("emails", "useTLS") or False return False def getLogMode(self): return self.getint("daemon", "logMode") def getLogLevel(self): return self.getint("daemon", "logging") def getMaxEventBatchSize(self): if self.has_option("daemon", "max_event_batch_size"): return self.getint("daemon", "max_event_batch_size") return 500 def getLogFile(self, filename=None): if filename is None: if self.has_option("daemon", "logFile"): filename = self.get("daemon", "logFile") else: raise ConfigError("The config file has no logFile option.") if self.has_option("daemon", "logPath"): path = self.get("daemon", "logPath") if not os.path.exists(path): os.makedirs(path) elif not os.path.isdir(path): raise ConfigError( "The logPath value in the config should point to a directory." ) path = os.path.join(path, filename) else: path = filename return path def getTimingLogFile(self): if ( not self.has_option("daemon", "timing_log") or self.get("daemon", "timing_log") != "on" ): return None return self.getLogFile() + ".timing" class Engine(object): """ The engine holds the main loop of event processing. """ def __init__(self, configPath): """ """ self._continue = True self._eventIdData = {} # Read/parse the config self.config = Config(configPath) # Get config values self._pluginCollections = [ PluginCollection(self, s) for s in self.config.getPluginPaths() ] self._sg = sg.Shotgun( self.config.getShotgunURL(), self.config.getEngineScriptName(), self.config.getEngineScriptKey(), http_proxy=self.config.getEngineProxyServer(), ) self._max_conn_retries = self.config.getint("daemon", "max_conn_retries") self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep") self._fetch_interval = self.config.getint("daemon", "fetch_interval") self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid") # Setup the loggers for the main engine if self.config.getLogMode() == 0: # Set the root logger for file output. rootLogger = logging.getLogger() rootLogger.config = self.config _setFilePathOnLogger(rootLogger, self.config.getLogFile()) print(self.config.getLogFile()) # Set the engine logger for email output. self.log = logging.getLogger("engine") self.setEmailsOnLogger(self.log, True) else: # Set the engine logger for file and email output. self.log = logging.getLogger("engine") self.log.config = self.config _setFilePathOnLogger(self.log, self.config.getLogFile()) self.setEmailsOnLogger(self.log, True) self.log.setLevel(self.config.getLogLevel()) # Setup the timing log file timing_log_filename = self.config.getTimingLogFile() if timing_log_filename: self.timing_logger = logging.getLogger("timing") self.timing_logger.setLevel(self.config.getLogLevel()) _setFilePathOnLogger(self.timing_logger, timing_log_filename) else: self.timing_logger = None super(Engine, self).__init__() def setEmailsOnLogger(self, logger, emails): # Configure the logger for email output _removeHandlersFromLogger(logger, logging.handlers.SMTPHandler) if emails is False: return smtpServer = self.config.getSMTPServer() smtpPort = self.config.getSMTPPort() fromAddr = self.config.getFromAddr() emailSubject = self.config.getEmailSubject() username = self.config.getEmailUsername() password = self.config.getEmailPassword() if self.config.getSecureSMTP(): secure = (None, None) else: secure = None if emails is True: toAddrs = self.config.getToAddrs() elif isinstance(emails, (list, tuple)): toAddrs = emails else: msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s." raise ValueError(msg % type(emails)) _addMailHandlerToLogger( logger, (smtpServer, smtpPort), fromAddr, toAddrs, emailSubject, username, password, secure, ) def start(self): """ Start the processing of events. The last processed id is loaded up from persistent storage on disk and the main loop is started. """ # TODO: Take value from config socket.setdefaulttimeout(60) # Notify which version of shotgun api we are using self.log.info("Using SG Python API version %s" % sg.__version__) try: for collection in self._pluginCollections: collection.load() self._loadEventIdData() self._mainLoop() except KeyboardInterrupt: self.log.warning("Keyboard interrupt. Cleaning up...") except Exception as err: msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s" self.log.critical(msg, type(err), traceback.format_exc(err)) def _loadEventIdData(self): """ Load the last processed event id from the disk If no event has ever been processed or if the eventIdFile has been deleted from disk, no id will be recoverable. In this case, we will try contacting Shotgun to get the latest event's id and we'll start processing from there. """ eventIdFile = self.config.getEventIdFile() if eventIdFile and os.path.exists(eventIdFile): try: fh = open(eventIdFile, "rb") try: self._eventIdData = pickle.load(fh) # Provide event id info to the plugin collections. Once # they've figured out what to do with it, ask them for their # last processed id. noStateCollections = [] for collection in self._pluginCollections: state = self._eventIdData.get(collection.path) if state: collection.setState(state) else: noStateCollections.append(collection) # If we don't have a state it means there's no match # in the id file. First we'll search to see the latest id a # matching plugin name has elsewhere in the id file. We do # this as a fallback in case the plugins directory has been # moved. If there's no match, use the latest event id # in Shotgun. if noStateCollections: maxPluginStates = {} for collection in self._eventIdData.values(): for pluginName, pluginState in collection.items(): if pluginName in maxPluginStates.keys(): if pluginState[0] > maxPluginStates[pluginName][0]: maxPluginStates[pluginName] = pluginState else: maxPluginStates[pluginName] = pluginState lastEventId = self._getLastEventIdFromDatabase() for collection in noStateCollections: state = collection.getState() for pluginName in state.keys(): if pluginName in maxPluginStates.keys(): state[pluginName] = maxPluginStates[pluginName] else: state[pluginName] = lastEventId collection.setState(state) except pickle.UnpicklingError: fh.close() # Backwards compatibility: # Reopen the file to try to read an old-style int fh = open(eventIdFile, "rb") line = fh.readline().strip() if line.isdigit(): # The _loadEventIdData got an old-style id file containing a single # int which is the last id properly processed. lastEventId = int(line) self.log.debug( "Read last event id (%d) from file.", lastEventId ) for collection in self._pluginCollections: collection.setState(lastEventId) fh.close() except OSError as err: raise EventDaemonError( "Could not load event id from file.\n\n%s" % traceback.format_exc(err) ) else: # No id file? # Get the event data from the database. lastEventId = self._getLastEventIdFromDatabase() if lastEventId: for collection in self._pluginCollections: collection.setState(lastEventId) self._saveEventIdData() def _getLastEventIdFromDatabase(self): conn_attempts = 0 lastEventId = None while lastEventId is None: order = [{"column": "id", "direction": "desc"}] try: result = self._sg.find_one( "EventLogEntry", filters=[], fields=["id"], order=order ) except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err)) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) else: lastEventId = result["id"] self.log.info("Last event id (%d) from the SG database.", lastEventId) return lastEventId def _mainLoop(self): """ Run the event processing loop. General behavior: - Load plugins from disk - see L{load} method. - Get new events from Shotgun - Loop through events - Loop through each plugin - Loop through each callback - Send the callback an event - Once all callbacks are done in all plugins, save the eventId - Go to the next event - Once all events are processed, wait for the defined fetch interval time and start over. Caveats: - If a plugin is deemed "inactive" (an error occured during registration), skip it. - If a callback is deemed "inactive" (an error occured during callback execution), skip it. - Each time through the loop, if the pidFile is gone, stop. """ self.log.debug("Starting the event processing loop.") while self._continue: # Process events events = self._getNewEvents() for event in events: for collection in self._pluginCollections: collection.process(event) self._saveEventIdData() # if we're lagging behind Shotgun, we received a full batch of events # skip the sleep() call in this case if len(events) < self.config.getMaxEventBatchSize(): time.sleep(self._fetch_interval) # Reload plugins for collection in self._pluginCollections: collection.load() # Make sure that newly loaded events have proper state. self._loadEventIdData() self.log.debug("Shuting down event processing loop.") def stop(self): self._continue = False def _getNewEvents(self): """ Fetch new events from Shotgun. @return: Recent events that need to be processed by the engine. @rtype: I{list} of Shotgun event dictionaries. """ nextEventId = None for newId in [ coll.getNextUnprocessedEventId() for coll in self._pluginCollections ]: if newId is not None and (nextEventId is None or newId < nextEventId): nextEventId = newId if nextEventId is not None: filters = [["id", "greater_than", nextEventId - 1]] fields = [ "id", "event_type", "attribute_name", "meta", "entity", "user", "project", "session_uuid", "created_at", ] order = [{"column": "id", "direction": "asc"}] conn_attempts = 0 while True: try: events = self._sg.find( "EventLogEntry", filters, fields, order, limit=self.config.getMaxEventBatchSize(), ) if events: self.log.debug( "Got %d events: %d to %d.", len(events), events[0]["id"], events[-1]["id"], ) return events except (sg.ProtocolError, sg.ResponseError, socket.error) as err: conn_attempts = self._checkConnectionAttempts( conn_attempts, str(err) ) except Exception as err: msg = "Unknown error: %s" % str(err) conn_attempts = self._checkConnectionAttempts(conn_attempts, msg) return [] def _saveEventIdData(self): """ Save an event Id to persistant storage. Next time the engine is started it will try to read the event id from this location to know at which event it should start processing. """ eventIdFile = self.config.getEventIdFile() if eventIdFile is not None: for collection in self._pluginCollections: self._eventIdData[collection.path] = collection.getState() for colPath, state in self._eventIdData.items(): if state: try: with open(eventIdFile, "wb") as fh: # Use protocol 2 so it can also be loaded in Python 2 pickle.dump(self._eventIdData, fh, protocol=2) except OSError as err: self.log.error( "Can not write event id data to %s.\n\n%s", eventIdFile, traceback.format_exc(err), ) break else: self.log.warning("No state was found. Not saving to disk.") def _checkConnectionAttempts(self, conn_attempts, msg): conn_attempts += 1 if conn_attempts == self._max_conn_retries: self.log.error( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) conn_attempts = 0 time.sleep(self._conn_retry_sleep) else: self.log.warning( "Unable to connect to SG (attempt %s of %s): %s", conn_attempts, self._max_conn_retries, msg, ) return conn_attempts class PluginCollection(object): """ A group of plugin files in a location on the disk. """ def __init__(self, engine, path): if not os.path.isdir(path): raise ValueError("Invalid path: %s" % path) self._engine = engine self.path = path self._plugins = {} self._stateData = {} def setState(self, state): if isinstance(state, int): for plugin in self: plugin.setState(state) self._stateData[plugin.getName()] = plugin.getState() else: self._stateData = state for plugin in self: pluginState = self._stateData.get(plugin.getName()) if pluginState: plugin.setState(pluginState) def getState(self): for plugin in self: self._stateData[plugin.getName()] = plugin.getState() return self._stateData def getNextUnprocessedEventId(self): eId = None for plugin in self: if not plugin.isActive(): continue newId = plugin.getNextUnprocessedEventId() if newId is not None and (eId is None or newId < eId): eId = newId return eId def process(self, event): for plugin in self: if plugin.isActive(): plugin.process(event) else: plugin.logger.debug("Skipping: inactive.") def load(self): """ Load plugins from disk. General behavior: - Loop on all paths. - Find all valid .py plugin files. - Loop on all plugin files. - For any new plugins, load them, otherwise, refresh them. """ newPlugins = {} for basename in os.listdir(self.path): if not basename.endswith(".py") or basename.startswith("."): continue if basename in self._plugins: newPlugins[basename] = self._plugins[basename] else: newPlugins[basename] = Plugin( self._engine, os.path.join(self.path, basename) ) newPlugins[basename].load() self._plugins = newPlugins def __iter__(self): for basename in sorted(self._plugins.keys()): yield self._plugins[basename] class Plugin(object): """ The plugin class represents a file on disk which contains one or more callbacks. """ def __init__(self, engine, path): """ @param engine: The engine that instanciated this plugin. @type engine: L{Engine} @param path: The path of the plugin file to load. @type path: I{str} @raise ValueError: If the path to the plugin is not a valid file. """ self._engine = engine self._path = path if not os.path.isfile(path): raise ValueError("The path to the plugin is not a valid file - %s." % path) self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0] self._active = True self._callbacks = [] self._mtime = None self._lastEventId = None self._backlog = {} # Setup the plugin's logger self.logger = logging.getLogger("plugin." + self.getName()) self.logger.config = self._engine.config self._engine.setEmailsOnLogger(self.logger, True) self.logger.setLevel(self._engine.config.getLogLevel()) if self._engine.config.getLogMode() == 1: _setFilePathOnLogger( self.logger, self._engine.config.getLogFile("plugin." + self.getName()) ) def getName(self): return self._pluginName def setState(self, state): if isinstance(state, int): self._lastEventId = state elif isinstance(state, tuple): self._lastEventId, self._backlog = state else: raise ValueError("Unknown state type: %s." % type(state)) def getState(self): return (self._lastEventId, self._backlog) def getNextUnprocessedEventId(self): if self._lastEventId: nextId = self._lastEventId + 1 else: nextId = None now = datetime.datetime.now() for k in list(self._backlog): v = self._backlog[k] if v < now: self.logger.warning("Timeout elapsed on backlog event id %d.", k) del self._backlog[k] elif nextId is None or k < nextId: nextId = k return nextId def isActive(self): """ Is the current plugin active. Should it's callbacks be run? @return: True if this plugin's callbacks should be run, False otherwise. @rtype: I{bool} """ return self._active def setEmails(self, *emails): """ Set the email addresses to whom this plugin should send errors. @param emails: See L{LogFactory.getLogger}'s emails argument for info. @type emails: A I{list}/I{tuple} of email addresses or I{bool}. """ self._engine.setEmailsOnLogger(self.logger, emails) def load(self): """ Load/Reload the plugin and all its callbacks. If a plugin has never been loaded it will be loaded normally. If the plugin has been loaded before it will be reloaded only if the file has been modified on disk. In this event callbacks will all be cleared and reloaded. General behavior: - Try to load the source of the plugin. - Try to find a function called registerCallbacks in the file. - Try to run the registration function. At every step along the way, if any error occurs the whole plugin will be deactivated and the function will return. """ # Check file mtime mtime = os.path.getmtime(self._path) if self._mtime is None: self._engine.log.info("Loading plugin at %s" % self._path) elif self._mtime < mtime: self._engine.log.info("Reloading plugin at %s" % self._path) else: # The mtime of file is equal or older. We don't need to do anything. return # Reset values self._mtime = mtime self._callbacks = [] self._active = True try: plugin = imp.load_source(self._pluginName, self._path) except: self._active = False self.logger.error( "Could not load the plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) return regFunc = getattr(plugin, "registerCallbacks", None) if callable(regFunc): try: regFunc(Registrar(self)) except: self._engine.log.critical( "Error running register callback function from plugin at %s.\n\n%s", self._path, traceback.format_exc(), ) self._active = False else: self._engine.log.critical( "Did not find a registerCallbacks function in plugin at %s.", self._path ) self._active = False def registerCallback( self, sgScriptName, sgScriptKey, callback, matchEvents=None, args=None, stopOnError=True, ): """ Register a callback in the plugin. """ global sg sgConnection = sg.Shotgun( self._engine.config.getShotgunURL(), sgScriptName, sgScriptKey, http_proxy=self._engine.config.getEngineProxyServer(), ) self._callbacks.append( Callback( callback, self, self._engine, sgConnection, matchEvents, args, stopOnError, ) ) def process(self, event): if event["id"] in self._backlog: if self._process(event): self.logger.info("Processed id %d from backlog." % event["id"]) del self._backlog[event["id"]] self._updateLastEventId(event) elif self._lastEventId is not None and event["id"] <= self._lastEventId: msg = "Event %d is too old. Last event processed was (%d)." self.logger.debug(msg, event["id"], self._lastEventId) else: if self._process(event): self._updateLastEventId(event) return self._active def _process(self, event): for callback in self: if callback.isActive(): if callback.canProcess(event): msg = "Dispatching event %d to callback %s." self.logger.debug(msg, event["id"], str(callback)) if not callback.process(event): # A callback in the plugin failed. Deactivate the whole # plugin. self._active = False break else: msg = "Skipping inactive callback %s in plugin." self.logger.debug(msg, str(callback)) return self._active def _updateLastEventId(self, event): BACKLOG_TIMEOUT = ( 5 # time in minutes after which we consider a pending event won't happen ) if self._lastEventId is not None and event["id"] > self._lastEventId + 1: event_date = event["created_at"].replace(tzinfo=None) if datetime.datetime.now() > ( event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT) ): # the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event # with a lower id should have shown up in the EventLog by now if it actually happened if event["id"] == self._lastEventId + 2: self.logger.info( "Event %d never happened - ignoring.", self._lastEventId + 1 ) else: self.logger.info( "Events %d-%d never happened - ignoring.", self._lastEventId + 1, event["id"] - 1, ) else: # in this case, we want to add the missing events to the backlog as they could show up in the # EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range # them to show up until they expire expiration = datetime.datetime.now() + datetime.timedelta( minutes=BACKLOG_TIMEOUT ) for skippedId in range(self._lastEventId + 1, event["id"]): self.logger.info("Adding event id %d to backlog.", skippedId) self._backlog[skippedId] = expiration self._lastEventId = event["id"] def __iter__(self): """ A plugin is iterable and will iterate over all its L{Callback} objects. """ return self._callbacks.__iter__() def __str__(self): """ Provide the name of the plugin when it is cast as string. @return: The name of the plugin. @rtype: I{str} """ return self.getName() class Registrar(object): """ See public API docs in docs folder. """ def __init__(self, plugin): """ Wrap a plugin so it can be passed to a user. """ self._plugin = plugin self._allowed = ["logger", "setEmails", "registerCallback"] def getLogger(self): """ Get the logger for this plugin. @return: The logger configured for this plugin. @rtype: L{logging.Logger} """ # TODO: Fix this ugly protected member access return self.logger def __getattr__(self, name): if name in self._allowed: return getattr(self._plugin, name) raise AttributeError( "type object '%s' has no attribute '%s'" % (type(self).__name__, name) ) class Callback(object): """ A part of a plugin that can be called to process a Shotgun event. """ def __init__( self, callback, plugin, engine, shotgun, matchEvents=None, args=None, stopOnError=True, ): """ @param callback: The function to run when a Shotgun event occurs. @type callback: A function object. @param engine: The engine that will dispatch to this callback. @type engine: L{Engine}. @param shotgun: The Shotgun instance that will be used to communicate with your Shotgun server. @type shotgun: L{sg.Shotgun} @param matchEvents: The event filter to match events against before invoking callback. @type matchEvents: dict @param args: Any datastructure you would like to be passed to your callback function. Defaults to None. @type args: Any object. @raise TypeError: If the callback is not a callable object. """ if not callable(callback): raise TypeError( "The callback must be a callable object (function, method or callable class instance)." ) self._name = None self._shotgun = shotgun self._callback = callback self._engine = engine self._logger = None self._matchEvents = matchEvents self._args = args self._stopOnError = stopOnError self._active = True # Find a name for this object if hasattr(callback, "__name__"): self._name = callback.__name__ elif hasattr(callback, "__class__") and hasattr(callback, "__call__"): self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback))) else: raise ValueError( "registerCallback should be called with a function or a callable object instance as callback argument." ) # TODO: Get rid of this protected member access self._logger = logging.getLogger(plugin.logger.name + "." + self._name) self._logger.config = self._engine.config def canProcess(self, event): if not self._matchEvents: return True if "*" in self._matchEvents: eventType = "*" else: eventType = event["event_type"] if eventType not in self._matchEvents: return False attributes = self._matchEvents[eventType] if attributes is None or "*" in attributes: return True if event["attribute_name"] and event["attribute_name"] in attributes: return True return False def process(self, event): """ Process an event with the callback object supplied on initialization. If an error occurs, it will be logged appropriately and the callback will be deactivated. @param event: The Shotgun event to process. @type event: I{dict} """ # set session_uuid for UI updates if self._engine._use_session_uuid: self._shotgun.set_session_uuid(event["session_uuid"]) if self._engine.timing_logger: start_time = datetime.datetime.now(SG_TIMEZONE.local) try: self._callback(self._shotgun, self._logger, event, self._args) error = False except: error = True # Get the local variables of the frame of our plugin tb = sys.exc_info()[2] stack = [] while tb: stack.append(tb.tb_frame) tb = tb.tb_next msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s" self._logger.critical( msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals) ) if self._stopOnError: self._active = False if self._engine.timing_logger: callback_name = self._logger.name.replace("plugin.", "") end_time = datetime.datetime.now(SG_TIMEZONE.local) duration = self._prettyTimeDeltaFormat(end_time - start_time) delay = self._prettyTimeDeltaFormat(start_time - event["created_at"]) msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s" data = [ event["id"], event["created_at"].isoformat(), callback_name, start_time.isoformat(), end_time.isoformat(), duration, str(error), delay, ] self._engine.timing_logger.info(msg_format, *data) return self._active def _prettyTimeDeltaFormat(self, time_delta): days, remainder = divmod(time_delta.total_seconds(), 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) return "%02d:%02d:%02d:%02d.%06d" % ( days, hours, minutes, seconds, time_delta.microseconds, ) def isActive(self): """ Check if this callback is active, i.e. if events should be passed to it for processing. @return: True if this callback should process events, False otherwise. @rtype: I{bool} """ return self._active def __str__(self): """ The name of the callback. @return: The name of the callback @rtype: I{str} """ return self._name class CustomSMTPHandler(logging.handlers.SMTPHandler): """ A custom SMTPHandler subclass that will adapt it's subject depending on the error severity. """ LEVEL_SUBJECTS = { logging.ERROR: "ERROR - SG event daemon.", logging.CRITICAL: "CRITICAL - SG event daemon.", } def __init__( self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None ): args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials] if credentials: # Python 2.7 implemented the secure argument if CURRENT_PYTHON_VERSION >= PYTHON_27: args.append(secure) else: self.secure = secure logging.handlers.SMTPHandler.__init__(self, *args) def getSubject(self, record): subject = logging.handlers.SMTPHandler.getSubject(self, record) if record.levelno in self.LEVEL_SUBJECTS: return subject + " " + self.LEVEL_SUBJECTS[record.levelno] return subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ # Mostly copied from Python 2.7 implementation. try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg, ) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.close() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class EventDaemonError(Exception): """ Base error for the Shotgun event system. """ pass class ConfigError(EventDaemonError): """ Used when an error is detected in the config file. """ pass if sys.platform == "win32": class WindowsService(win32serviceutil.ServiceFramework): """ Windows service wrapper """ _svc_name_ = "ShotgunEventDaemon" _svc_display_name_ = "Shotgun Event Handler" def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) self._engine = Engine(_getConfigPath()) def SvcStop(self): """ Stop the Windows service. """ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) win32event.SetEvent(self.hWaitStop) self._engine.stop() def SvcDoRun(self): """ Start the Windows service. """ servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ""), ) self.main() def main(self): """ Primary Windows entry point """ self._engine.start() class LinuxDaemon(daemonizer.Daemon): """ Linux Daemon wrapper or wrapper used for foreground operation on Windows """ def __init__(self): self._engine = Engine(_getConfigPath()) super(LinuxDaemon, self).__init__( "shotgunEvent", self._engine.config.getEnginePIDFile() ) def start(self, daemonize=True): if not daemonize: # Setup the stdout logger handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(levelname)s:%(name)s:%(message)s") ) logging.getLogger().addHandler(handler) super(LinuxDaemon, self).start(daemonize) def _run(self): """ Start the engine's main loop """ self._engine.start() def _cleanup(self): self._engine.stop() def main(): """ """ if CURRENT_PYTHON_VERSION <= PYTHON_26: print( "Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer." ) return 3 action = None if len(sys.argv) > 1: action = sys.argv[1] if sys.platform == "win32" and action != "foreground": win32serviceutil.HandleCommandLine(WindowsService) return 0 if action: daemon = LinuxDaemon() # Find the function to call on the daemon and call it func = getattr(daemon, action, None) if action[:1] != "_" and func is not None: func() return 0 print("Unknown command: %s" % action) print("usage: %s start|stop|restart|foreground" % sys.argv[0]) return 2 def _getConfigPath(): """ Get the path of the shotgunEventDaemon configuration file. """ paths = ["/etc", os.path.dirname(__file__)] # Get the current path of the daemon script scriptPath = sys.argv[0] if scriptPath != "" and scriptPath != "-c": # Make absolute path and eliminate any symlinks if any. scriptPath = os.path.abspath(scriptPath) scriptPath = os.path.realpath(scriptPath) # Add the script's directory to the paths we'll search for the config. paths[:0] = [os.path.dirname(scriptPath)] # Search for a config file. for path in paths: path = os.path.join(path, "shotgunEventDaemon.conf") if os.path.exists(path): return path # No config file was found raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths)) if __name__ == "__main__": sys.exit(main())
get_schema
A record of an event made for purposes of maintaining a security log. Typical uses include detection of intrusion attempts and monitoring for inappropriate usage. id: unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. identifier: Identifies a specific instance of the entity. The reference should always be version specific. reference: Identifies a specific instance of the entity. The reference should be version specific. type: The type of the object that was involved in this audit event. role: Code representing the role the entity played in the event being audited. lifecycle: Identifier for the data life-cycle stage for the entity. securityLabel: Security labels for the identified entity. name: A name of the entity in the audit event. description: Text that describes the entity in more detail. query: The query parameters for a query-type entities. detail: Tagged value pairs for conveying additional information about the entity.
from typing import Union, List, Optional from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType # This file is auto-generated by generate_schema so do not edit manually # noinspection PyPep8Naming class AuditEvent_EntitySchema: """ A record of an event made for purposes of maintaining a security log. Typical uses include detection of intrusion attempts and monitoring for inappropriate usage. """ # noinspection PyDefaultArgument # MASKED: get_schema function (lines 16-249)
@staticmethod def get_schema( max_nesting_depth: Optional[int] = 6, nesting_depth: int = 0, nesting_list: List[str] = [], max_recursion_limit: Optional[int] = 2, include_extension: Optional[bool] = False, extension_fields: Optional[List[str]] = [ "valueBoolean", "valueCode", "valueDate", "valueDateTime", "valueDecimal", "valueId", "valueInteger", "valuePositiveInt", "valueString", "valueTime", "valueUnsignedInt", "valueUri", "valueQuantity", ], extension_depth: int = 0, max_extension_depth: Optional[int] = 2, ) -> Union[StructType, DataType]: """ A record of an event made for purposes of maintaining a security log. Typical uses include detection of intrusion attempts and monitoring for inappropriate usage. id: unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. identifier: Identifies a specific instance of the entity. The reference should always be version specific. reference: Identifies a specific instance of the entity. The reference should be version specific. type: The type of the object that was involved in this audit event. role: Code representing the role the entity played in the event being audited. lifecycle: Identifier for the data life-cycle stage for the entity. securityLabel: Security labels for the identified entity. name: A name of the entity in the audit event. description: Text that describes the entity in more detail. query: The query parameters for a query-type entities. detail: Tagged value pairs for conveying additional information about the entity. """ from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema from spark_fhir_schemas.stu3.complex_types.auditevent_detail import ( AuditEvent_DetailSchema, ) if ( max_recursion_limit and nesting_list.count("AuditEvent_Entity") >= max_recursion_limit ) or (max_nesting_depth and nesting_depth >= max_nesting_depth): return StructType([StructField("id", StringType(), True)]) # add my name to recursion list for later my_nesting_list: List[str] = nesting_list + ["AuditEvent_Entity"] schema = StructType( [ # unique id for the element within a resource (for internal references). This # may be any string value that does not contain spaces. StructField("id", StringType(), True), # May be used to represent additional information that is not part of the basic # definition of the element. In order to make the use of extensions safe and # manageable, there is a strict set of governance applied to the definition and # use of extensions. Though any implementer is allowed to define an extension, # there is a set of requirements that SHALL be met as part of the definition of # the extension. StructField( "extension", ArrayType( ExtensionSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, ) ), True, ), # Identifies a specific instance of the entity. The reference should always be # version specific. StructField( "identifier", IdentifierSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # Identifies a specific instance of the entity. The reference should be version # specific. StructField( "reference", ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # The type of the object that was involved in this audit event. StructField( "type", CodingSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # Code representing the role the entity played in the event being audited. StructField( "role", CodingSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # Identifier for the data life-cycle stage for the entity. StructField( "lifecycle", CodingSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # Security labels for the identified entity. StructField( "securityLabel", ArrayType( CodingSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, ) ), True, ), # A name of the entity in the audit event. StructField("name", StringType(), True), # Text that describes the entity in more detail. StructField("description", StringType(), True), # The query parameters for a query-type entities. StructField("query", StringType(), True), # Tagged value pairs for conveying additional information about the entity. StructField( "detail", ArrayType( AuditEvent_DetailSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, ) ), True, ), ] ) if not include_extension: schema.fields = [ c if c.name != "extension" else StructField("extension", StringType(), True) for c in schema.fields ] return schema
16
249
from typing import Union, List, Optional from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType # This file is auto-generated by generate_schema so do not edit manually # noinspection PyPep8Naming class AuditEvent_EntitySchema: """ A record of an event made for purposes of maintaining a security log. Typical uses include detection of intrusion attempts and monitoring for inappropriate usage. """ # noinspection PyDefaultArgument @staticmethod def get_schema( max_nesting_depth: Optional[int] = 6, nesting_depth: int = 0, nesting_list: List[str] = [], max_recursion_limit: Optional[int] = 2, include_extension: Optional[bool] = False, extension_fields: Optional[List[str]] = [ "valueBoolean", "valueCode", "valueDate", "valueDateTime", "valueDecimal", "valueId", "valueInteger", "valuePositiveInt", "valueString", "valueTime", "valueUnsignedInt", "valueUri", "valueQuantity", ], extension_depth: int = 0, max_extension_depth: Optional[int] = 2, ) -> Union[StructType, DataType]: """ A record of an event made for purposes of maintaining a security log. Typical uses include detection of intrusion attempts and monitoring for inappropriate usage. id: unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. identifier: Identifies a specific instance of the entity. The reference should always be version specific. reference: Identifies a specific instance of the entity. The reference should be version specific. type: The type of the object that was involved in this audit event. role: Code representing the role the entity played in the event being audited. lifecycle: Identifier for the data life-cycle stage for the entity. securityLabel: Security labels for the identified entity. name: A name of the entity in the audit event. description: Text that describes the entity in more detail. query: The query parameters for a query-type entities. detail: Tagged value pairs for conveying additional information about the entity. """ from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema from spark_fhir_schemas.stu3.complex_types.auditevent_detail import ( AuditEvent_DetailSchema, ) if ( max_recursion_limit and nesting_list.count("AuditEvent_Entity") >= max_recursion_limit ) or (max_nesting_depth and nesting_depth >= max_nesting_depth): return StructType([StructField("id", StringType(), True)]) # add my name to recursion list for later my_nesting_list: List[str] = nesting_list + ["AuditEvent_Entity"] schema = StructType( [ # unique id for the element within a resource (for internal references). This # may be any string value that does not contain spaces. StructField("id", StringType(), True), # May be used to represent additional information that is not part of the basic # definition of the element. In order to make the use of extensions safe and # manageable, there is a strict set of governance applied to the definition and # use of extensions. Though any implementer is allowed to define an extension, # there is a set of requirements that SHALL be met as part of the definition of # the extension. StructField( "extension", ArrayType( ExtensionSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, ) ), True, ), # Identifies a specific instance of the entity. The reference should always be # version specific. StructField( "identifier", IdentifierSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # Identifies a specific instance of the entity. The reference should be version # specific. StructField( "reference", ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # The type of the object that was involved in this audit event. StructField( "type", CodingSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # Code representing the role the entity played in the event being audited. StructField( "role", CodingSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # Identifier for the data life-cycle stage for the entity. StructField( "lifecycle", CodingSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, ), True, ), # Security labels for the identified entity. StructField( "securityLabel", ArrayType( CodingSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, ) ), True, ), # A name of the entity in the audit event. StructField("name", StringType(), True), # Text that describes the entity in more detail. StructField("description", StringType(), True), # The query parameters for a query-type entities. StructField("query", StringType(), True), # Tagged value pairs for conveying additional information about the entity. StructField( "detail", ArrayType( AuditEvent_DetailSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, ) ), True, ), ] ) if not include_extension: schema.fields = [ c if c.name != "extension" else StructField("extension", StringType(), True) for c in schema.fields ] return schema
autocorrelate
Gives the correlation coefficient for the signal's correlation with itself. Args: signal: The signal on which to compute the autocorrelation. Can be a list. lag: The offset at which to correlate the signal with itself. E.g. if lag is 1, will compute the correlation between the signal and itself 1 beat later. Returns: Correlation coefficient.
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions to support the RLTuner and NoteRNNLoader classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random # internal imports import numpy as np from six.moves import range # pylint: disable=redefined-builtin import tensorflow as tf LSTM_STATE_NAME = 'lstm' # Number of output note classes. This is a property of the dataset. NUM_CLASSES = 38 # Default batch size. BATCH_SIZE = 128 # Music-related constants. INITIAL_MIDI_VALUE = 48 NUM_SPECIAL_EVENTS = 2 MIN_NOTE = 48 # Inclusive MAX_NOTE = 84 # Exclusive TRANSPOSE_TO_KEY = 0 # C Major DEFAULT_QPM = 80.0 # Music theory constants used in defining reward functions. # Note that action 2 = midi note 48. C_MAJOR_SCALE = [2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26] C_MAJOR_KEY = [0, 1, 2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26, 28, 30, 31, 33, 35, 37] C_MAJOR_TONIC = 14 A_MINOR_TONIC = 23 # The number of half-steps in musical intervals, in order of dissonance OCTAVE = 12 FIFTH = 7 THIRD = 4 SIXTH = 9 SECOND = 2 FOURTH = 5 SEVENTH = 11 HALFSTEP = 1 # Special intervals that have unique rewards REST_INTERVAL = -1 HOLD_INTERVAL = -1.5 REST_INTERVAL_AFTER_THIRD_OR_FIFTH = -2 HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH = -2.5 IN_KEY_THIRD = -3 IN_KEY_FIFTH = -5 # Indicate melody direction ASCENDING = 1 DESCENDING = -1 # Indicate whether a melodic leap has been resolved or if another leap was made LEAP_RESOLVED = 1 LEAP_DOUBLED = -1 def default_hparams(): """Generates the hparams used to train note rnn used in paper.""" return tf.contrib.training.HParams(use_dynamic_rnn=True, batch_size=BATCH_SIZE, lr=0.0002, l2_reg=2.5e-5, clip_norm=5, initial_learning_rate=0.5, decay_steps=1000, decay_rate=0.85, rnn_layer_sizes=[100], skip_first_n_losses=32, one_hot_length=NUM_CLASSES, exponentially_decay_learning_rate=True) def basic_rnn_hparams(): """Generates the hparams used to train a basic_rnn. These are the hparams used in the .mag file found at https://github.com/tensorflow/magenta/tree/master/magenta/models/ melody_rnn#pre-trained Returns: Hyperparameters of the downloadable basic_rnn pre-trained model. """ # TODO(natashajaques): ability to restore basic_rnn from any .mag file. return tf.contrib.training.HParams(batch_size=128, rnn_layer_sizes=[512, 512], one_hot_length=NUM_CLASSES) def default_dqn_hparams(): """Generates the default hparams for RLTuner DQN model.""" return tf.contrib.training.HParams(random_action_probability=0.1, store_every_nth=1, train_every_nth=5, minibatch_size=32, discount_rate=0.95, max_experience=100000, target_network_update_rate=0.01) # MASKED: autocorrelate function (lines 125-140) def linear_annealing(n, total, p_initial, p_final): """Linearly interpolates a probability between p_initial and p_final. Current probability is based on the current step, n. Used to linearly anneal the exploration probability of the RLTuner. Args: n: The current step. total: The total number of steps that will be taken (usually the length of the exploration period). p_initial: The initial probability. p_final: The final probability. Returns: The current probability (between p_initial and p_final). """ if n >= total: return p_final else: return p_initial - (n * (p_initial - p_final)) / (total) def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) def sample_softmax(softmax_vect): """Samples a note from an array of softmax probabilities. Tries to do this with numpy, which requires that the probabilities add to 1.0 with extreme precision. If this fails, uses a manual implementation. Args: softmax_vect: An array of probabilities. Returns: The index of the note that was chosen/sampled. """ try: sample = np.argmax(np.random.multinomial(1, pvals=softmax_vect)) return sample except: # pylint: disable=bare-except r = random.uniform(0, np.sum(softmax_vect)) upto = 0 for i in range(len(softmax_vect)): if upto + softmax_vect[i] >= r: return i upto += softmax_vect[i] tf.logging.warn("Error! sample softmax function shouldn't get here") print("Error! sample softmax function shouldn't get here") return len(softmax_vect) - 1 def decoder(event_list, transpose_amount): """Translates a sequence generated by RLTuner to MonophonicMelody form. Args: event_list: Integer list of encoded notes. transpose_amount: Key to transpose to. Returns: Integer list of MIDI values. """ return [e - NUM_SPECIAL_EVENTS if e < NUM_SPECIAL_EVENTS else e + INITIAL_MIDI_VALUE - transpose_amount for e in event_list] def make_onehot(int_list, one_hot_length): """Convert each int to a one-hot vector. A one-hot vector is 0 everywhere except at the index equal to the encoded value. For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...] Args: int_list: A list of ints, each of which will get a one-hot encoding. one_hot_length: The length of the one-hot vector to be created. Returns: A list of one-hot encodings of the ints. """ return [[1.0 if j == i else 0.0 for j in range(one_hot_length)] for i in int_list] def get_inner_scope(scope_str): """Takes a tensorflow scope string and finds the inner scope. Inner scope is one layer more internal. Args: scope_str: Tensorflow variable scope string. Returns: Scope string with outer scope stripped off. """ idx = scope_str.find('/') return scope_str[idx + 1:] def trim_variable_postfixes(scope_str): """Trims any extra numbers added to a tensorflow scope string. Necessary to align variables in graph and checkpoint Args: scope_str: Tensorflow variable scope string. Returns: Scope string with extra numbers trimmed off. """ idx = scope_str.find(':') return scope_str[:idx] def get_variable_names(graph, scope): """Finds all the variable names in a graph that begin with a given scope. Args: graph: A tensorflow graph. scope: A string scope. Returns: List of variables. """ with graph.as_default(): return [v.name for v in tf.global_variables() if v.name.startswith(scope)] def get_next_file_name(directory, prefix, extension): """Finds next available filename in directory by appending numbers to prefix. E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already contains 'myfile.png' and 'myfile1.png', this function will return 'myfile2.png'. Args: directory: Path to the relevant directory. prefix: The filename prefix to use. extension: String extension of the file, eg. '.mid'. Returns: String name of the file. """ name = directory + '/' + prefix + '.' + extension i = 0 while os.path.isfile(name): i += 1 name = directory + '/' + prefix + str(i) + '.' + extension return name def make_rnn_cell(rnn_layer_sizes, state_is_tuple=False): """Makes a default LSTM cell for use in the NoteRNNLoader graph. This model is only to be used for loading the checkpoint from the research paper. In general, events_rnn_graph.make_rnn_cell should be used instead. Args: rnn_layer_sizes: A list of integer sizes (in units) for each layer of the RNN. state_is_tuple: A boolean specifying whether to use tuple of hidden matrix and cell matrix as a state instead of a concatenated matrix. Returns: A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters. """ cells = [] for num_units in rnn_layer_sizes: cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=state_is_tuple) cells.append(cell) cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple) return cell def log_sum_exp(xs): """Computes the log sum exp value of a tensor.""" maxes = tf.reduce_max(xs, keep_dims=True) xs -= maxes return tf.squeeze(maxes, [-1]) + tf.log(tf.reduce_sum(tf.exp(xs), -1))
def autocorrelate(signal, lag=1): """Gives the correlation coefficient for the signal's correlation with itself. Args: signal: The signal on which to compute the autocorrelation. Can be a list. lag: The offset at which to correlate the signal with itself. E.g. if lag is 1, will compute the correlation between the signal and itself 1 beat later. Returns: Correlation coefficient. """ n = len(signal) x = np.asarray(signal) - np.mean(signal) c0 = np.var(signal) return (x[lag:] * x[:n - lag]).sum() / float(n) / c0
125
140
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions to support the RLTuner and NoteRNNLoader classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random # internal imports import numpy as np from six.moves import range # pylint: disable=redefined-builtin import tensorflow as tf LSTM_STATE_NAME = 'lstm' # Number of output note classes. This is a property of the dataset. NUM_CLASSES = 38 # Default batch size. BATCH_SIZE = 128 # Music-related constants. INITIAL_MIDI_VALUE = 48 NUM_SPECIAL_EVENTS = 2 MIN_NOTE = 48 # Inclusive MAX_NOTE = 84 # Exclusive TRANSPOSE_TO_KEY = 0 # C Major DEFAULT_QPM = 80.0 # Music theory constants used in defining reward functions. # Note that action 2 = midi note 48. C_MAJOR_SCALE = [2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26] C_MAJOR_KEY = [0, 1, 2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26, 28, 30, 31, 33, 35, 37] C_MAJOR_TONIC = 14 A_MINOR_TONIC = 23 # The number of half-steps in musical intervals, in order of dissonance OCTAVE = 12 FIFTH = 7 THIRD = 4 SIXTH = 9 SECOND = 2 FOURTH = 5 SEVENTH = 11 HALFSTEP = 1 # Special intervals that have unique rewards REST_INTERVAL = -1 HOLD_INTERVAL = -1.5 REST_INTERVAL_AFTER_THIRD_OR_FIFTH = -2 HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH = -2.5 IN_KEY_THIRD = -3 IN_KEY_FIFTH = -5 # Indicate melody direction ASCENDING = 1 DESCENDING = -1 # Indicate whether a melodic leap has been resolved or if another leap was made LEAP_RESOLVED = 1 LEAP_DOUBLED = -1 def default_hparams(): """Generates the hparams used to train note rnn used in paper.""" return tf.contrib.training.HParams(use_dynamic_rnn=True, batch_size=BATCH_SIZE, lr=0.0002, l2_reg=2.5e-5, clip_norm=5, initial_learning_rate=0.5, decay_steps=1000, decay_rate=0.85, rnn_layer_sizes=[100], skip_first_n_losses=32, one_hot_length=NUM_CLASSES, exponentially_decay_learning_rate=True) def basic_rnn_hparams(): """Generates the hparams used to train a basic_rnn. These are the hparams used in the .mag file found at https://github.com/tensorflow/magenta/tree/master/magenta/models/ melody_rnn#pre-trained Returns: Hyperparameters of the downloadable basic_rnn pre-trained model. """ # TODO(natashajaques): ability to restore basic_rnn from any .mag file. return tf.contrib.training.HParams(batch_size=128, rnn_layer_sizes=[512, 512], one_hot_length=NUM_CLASSES) def default_dqn_hparams(): """Generates the default hparams for RLTuner DQN model.""" return tf.contrib.training.HParams(random_action_probability=0.1, store_every_nth=1, train_every_nth=5, minibatch_size=32, discount_rate=0.95, max_experience=100000, target_network_update_rate=0.01) def autocorrelate(signal, lag=1): """Gives the correlation coefficient for the signal's correlation with itself. Args: signal: The signal on which to compute the autocorrelation. Can be a list. lag: The offset at which to correlate the signal with itself. E.g. if lag is 1, will compute the correlation between the signal and itself 1 beat later. Returns: Correlation coefficient. """ n = len(signal) x = np.asarray(signal) - np.mean(signal) c0 = np.var(signal) return (x[lag:] * x[:n - lag]).sum() / float(n) / c0 def linear_annealing(n, total, p_initial, p_final): """Linearly interpolates a probability between p_initial and p_final. Current probability is based on the current step, n. Used to linearly anneal the exploration probability of the RLTuner. Args: n: The current step. total: The total number of steps that will be taken (usually the length of the exploration period). p_initial: The initial probability. p_final: The final probability. Returns: The current probability (between p_initial and p_final). """ if n >= total: return p_final else: return p_initial - (n * (p_initial - p_final)) / (total) def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) def sample_softmax(softmax_vect): """Samples a note from an array of softmax probabilities. Tries to do this with numpy, which requires that the probabilities add to 1.0 with extreme precision. If this fails, uses a manual implementation. Args: softmax_vect: An array of probabilities. Returns: The index of the note that was chosen/sampled. """ try: sample = np.argmax(np.random.multinomial(1, pvals=softmax_vect)) return sample except: # pylint: disable=bare-except r = random.uniform(0, np.sum(softmax_vect)) upto = 0 for i in range(len(softmax_vect)): if upto + softmax_vect[i] >= r: return i upto += softmax_vect[i] tf.logging.warn("Error! sample softmax function shouldn't get here") print("Error! sample softmax function shouldn't get here") return len(softmax_vect) - 1 def decoder(event_list, transpose_amount): """Translates a sequence generated by RLTuner to MonophonicMelody form. Args: event_list: Integer list of encoded notes. transpose_amount: Key to transpose to. Returns: Integer list of MIDI values. """ return [e - NUM_SPECIAL_EVENTS if e < NUM_SPECIAL_EVENTS else e + INITIAL_MIDI_VALUE - transpose_amount for e in event_list] def make_onehot(int_list, one_hot_length): """Convert each int to a one-hot vector. A one-hot vector is 0 everywhere except at the index equal to the encoded value. For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...] Args: int_list: A list of ints, each of which will get a one-hot encoding. one_hot_length: The length of the one-hot vector to be created. Returns: A list of one-hot encodings of the ints. """ return [[1.0 if j == i else 0.0 for j in range(one_hot_length)] for i in int_list] def get_inner_scope(scope_str): """Takes a tensorflow scope string and finds the inner scope. Inner scope is one layer more internal. Args: scope_str: Tensorflow variable scope string. Returns: Scope string with outer scope stripped off. """ idx = scope_str.find('/') return scope_str[idx + 1:] def trim_variable_postfixes(scope_str): """Trims any extra numbers added to a tensorflow scope string. Necessary to align variables in graph and checkpoint Args: scope_str: Tensorflow variable scope string. Returns: Scope string with extra numbers trimmed off. """ idx = scope_str.find(':') return scope_str[:idx] def get_variable_names(graph, scope): """Finds all the variable names in a graph that begin with a given scope. Args: graph: A tensorflow graph. scope: A string scope. Returns: List of variables. """ with graph.as_default(): return [v.name for v in tf.global_variables() if v.name.startswith(scope)] def get_next_file_name(directory, prefix, extension): """Finds next available filename in directory by appending numbers to prefix. E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already contains 'myfile.png' and 'myfile1.png', this function will return 'myfile2.png'. Args: directory: Path to the relevant directory. prefix: The filename prefix to use. extension: String extension of the file, eg. '.mid'. Returns: String name of the file. """ name = directory + '/' + prefix + '.' + extension i = 0 while os.path.isfile(name): i += 1 name = directory + '/' + prefix + str(i) + '.' + extension return name def make_rnn_cell(rnn_layer_sizes, state_is_tuple=False): """Makes a default LSTM cell for use in the NoteRNNLoader graph. This model is only to be used for loading the checkpoint from the research paper. In general, events_rnn_graph.make_rnn_cell should be used instead. Args: rnn_layer_sizes: A list of integer sizes (in units) for each layer of the RNN. state_is_tuple: A boolean specifying whether to use tuple of hidden matrix and cell matrix as a state instead of a concatenated matrix. Returns: A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters. """ cells = [] for num_units in rnn_layer_sizes: cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=state_is_tuple) cells.append(cell) cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple) return cell def log_sum_exp(xs): """Computes the log sum exp value of a tensor.""" maxes = tf.reduce_max(xs, keep_dims=True) xs -= maxes return tf.squeeze(maxes, [-1]) + tf.log(tf.reduce_sum(tf.exp(xs), -1))
get_next_file_name
Finds next available filename in directory by appending numbers to prefix. E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already contains 'myfile.png' and 'myfile1.png', this function will return 'myfile2.png'. Args: directory: Path to the relevant directory. prefix: The filename prefix to use. extension: String extension of the file, eg. '.mid'. Returns: String name of the file.
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions to support the RLTuner and NoteRNNLoader classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random # internal imports import numpy as np from six.moves import range # pylint: disable=redefined-builtin import tensorflow as tf LSTM_STATE_NAME = 'lstm' # Number of output note classes. This is a property of the dataset. NUM_CLASSES = 38 # Default batch size. BATCH_SIZE = 128 # Music-related constants. INITIAL_MIDI_VALUE = 48 NUM_SPECIAL_EVENTS = 2 MIN_NOTE = 48 # Inclusive MAX_NOTE = 84 # Exclusive TRANSPOSE_TO_KEY = 0 # C Major DEFAULT_QPM = 80.0 # Music theory constants used in defining reward functions. # Note that action 2 = midi note 48. C_MAJOR_SCALE = [2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26] C_MAJOR_KEY = [0, 1, 2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26, 28, 30, 31, 33, 35, 37] C_MAJOR_TONIC = 14 A_MINOR_TONIC = 23 # The number of half-steps in musical intervals, in order of dissonance OCTAVE = 12 FIFTH = 7 THIRD = 4 SIXTH = 9 SECOND = 2 FOURTH = 5 SEVENTH = 11 HALFSTEP = 1 # Special intervals that have unique rewards REST_INTERVAL = -1 HOLD_INTERVAL = -1.5 REST_INTERVAL_AFTER_THIRD_OR_FIFTH = -2 HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH = -2.5 IN_KEY_THIRD = -3 IN_KEY_FIFTH = -5 # Indicate melody direction ASCENDING = 1 DESCENDING = -1 # Indicate whether a melodic leap has been resolved or if another leap was made LEAP_RESOLVED = 1 LEAP_DOUBLED = -1 def default_hparams(): """Generates the hparams used to train note rnn used in paper.""" return tf.contrib.training.HParams(use_dynamic_rnn=True, batch_size=BATCH_SIZE, lr=0.0002, l2_reg=2.5e-5, clip_norm=5, initial_learning_rate=0.5, decay_steps=1000, decay_rate=0.85, rnn_layer_sizes=[100], skip_first_n_losses=32, one_hot_length=NUM_CLASSES, exponentially_decay_learning_rate=True) def basic_rnn_hparams(): """Generates the hparams used to train a basic_rnn. These are the hparams used in the .mag file found at https://github.com/tensorflow/magenta/tree/master/magenta/models/ melody_rnn#pre-trained Returns: Hyperparameters of the downloadable basic_rnn pre-trained model. """ # TODO(natashajaques): ability to restore basic_rnn from any .mag file. return tf.contrib.training.HParams(batch_size=128, rnn_layer_sizes=[512, 512], one_hot_length=NUM_CLASSES) def default_dqn_hparams(): """Generates the default hparams for RLTuner DQN model.""" return tf.contrib.training.HParams(random_action_probability=0.1, store_every_nth=1, train_every_nth=5, minibatch_size=32, discount_rate=0.95, max_experience=100000, target_network_update_rate=0.01) def autocorrelate(signal, lag=1): """Gives the correlation coefficient for the signal's correlation with itself. Args: signal: The signal on which to compute the autocorrelation. Can be a list. lag: The offset at which to correlate the signal with itself. E.g. if lag is 1, will compute the correlation between the signal and itself 1 beat later. Returns: Correlation coefficient. """ n = len(signal) x = np.asarray(signal) - np.mean(signal) c0 = np.var(signal) return (x[lag:] * x[:n - lag]).sum() / float(n) / c0 def linear_annealing(n, total, p_initial, p_final): """Linearly interpolates a probability between p_initial and p_final. Current probability is based on the current step, n. Used to linearly anneal the exploration probability of the RLTuner. Args: n: The current step. total: The total number of steps that will be taken (usually the length of the exploration period). p_initial: The initial probability. p_final: The final probability. Returns: The current probability (between p_initial and p_final). """ if n >= total: return p_final else: return p_initial - (n * (p_initial - p_final)) / (total) def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) def sample_softmax(softmax_vect): """Samples a note from an array of softmax probabilities. Tries to do this with numpy, which requires that the probabilities add to 1.0 with extreme precision. If this fails, uses a manual implementation. Args: softmax_vect: An array of probabilities. Returns: The index of the note that was chosen/sampled. """ try: sample = np.argmax(np.random.multinomial(1, pvals=softmax_vect)) return sample except: # pylint: disable=bare-except r = random.uniform(0, np.sum(softmax_vect)) upto = 0 for i in range(len(softmax_vect)): if upto + softmax_vect[i] >= r: return i upto += softmax_vect[i] tf.logging.warn("Error! sample softmax function shouldn't get here") print("Error! sample softmax function shouldn't get here") return len(softmax_vect) - 1 def decoder(event_list, transpose_amount): """Translates a sequence generated by RLTuner to MonophonicMelody form. Args: event_list: Integer list of encoded notes. transpose_amount: Key to transpose to. Returns: Integer list of MIDI values. """ return [e - NUM_SPECIAL_EVENTS if e < NUM_SPECIAL_EVENTS else e + INITIAL_MIDI_VALUE - transpose_amount for e in event_list] def make_onehot(int_list, one_hot_length): """Convert each int to a one-hot vector. A one-hot vector is 0 everywhere except at the index equal to the encoded value. For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...] Args: int_list: A list of ints, each of which will get a one-hot encoding. one_hot_length: The length of the one-hot vector to be created. Returns: A list of one-hot encodings of the ints. """ return [[1.0 if j == i else 0.0 for j in range(one_hot_length)] for i in int_list] def get_inner_scope(scope_str): """Takes a tensorflow scope string and finds the inner scope. Inner scope is one layer more internal. Args: scope_str: Tensorflow variable scope string. Returns: Scope string with outer scope stripped off. """ idx = scope_str.find('/') return scope_str[idx + 1:] def trim_variable_postfixes(scope_str): """Trims any extra numbers added to a tensorflow scope string. Necessary to align variables in graph and checkpoint Args: scope_str: Tensorflow variable scope string. Returns: Scope string with extra numbers trimmed off. """ idx = scope_str.find(':') return scope_str[:idx] def get_variable_names(graph, scope): """Finds all the variable names in a graph that begin with a given scope. Args: graph: A tensorflow graph. scope: A string scope. Returns: List of variables. """ with graph.as_default(): return [v.name for v in tf.global_variables() if v.name.startswith(scope)] # MASKED: get_next_file_name function (lines 269-288) def make_rnn_cell(rnn_layer_sizes, state_is_tuple=False): """Makes a default LSTM cell for use in the NoteRNNLoader graph. This model is only to be used for loading the checkpoint from the research paper. In general, events_rnn_graph.make_rnn_cell should be used instead. Args: rnn_layer_sizes: A list of integer sizes (in units) for each layer of the RNN. state_is_tuple: A boolean specifying whether to use tuple of hidden matrix and cell matrix as a state instead of a concatenated matrix. Returns: A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters. """ cells = [] for num_units in rnn_layer_sizes: cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=state_is_tuple) cells.append(cell) cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple) return cell def log_sum_exp(xs): """Computes the log sum exp value of a tensor.""" maxes = tf.reduce_max(xs, keep_dims=True) xs -= maxes return tf.squeeze(maxes, [-1]) + tf.log(tf.reduce_sum(tf.exp(xs), -1))
def get_next_file_name(directory, prefix, extension): """Finds next available filename in directory by appending numbers to prefix. E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already contains 'myfile.png' and 'myfile1.png', this function will return 'myfile2.png'. Args: directory: Path to the relevant directory. prefix: The filename prefix to use. extension: String extension of the file, eg. '.mid'. Returns: String name of the file. """ name = directory + '/' + prefix + '.' + extension i = 0 while os.path.isfile(name): i += 1 name = directory + '/' + prefix + str(i) + '.' + extension return name
269
288
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions to support the RLTuner and NoteRNNLoader classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random # internal imports import numpy as np from six.moves import range # pylint: disable=redefined-builtin import tensorflow as tf LSTM_STATE_NAME = 'lstm' # Number of output note classes. This is a property of the dataset. NUM_CLASSES = 38 # Default batch size. BATCH_SIZE = 128 # Music-related constants. INITIAL_MIDI_VALUE = 48 NUM_SPECIAL_EVENTS = 2 MIN_NOTE = 48 # Inclusive MAX_NOTE = 84 # Exclusive TRANSPOSE_TO_KEY = 0 # C Major DEFAULT_QPM = 80.0 # Music theory constants used in defining reward functions. # Note that action 2 = midi note 48. C_MAJOR_SCALE = [2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26] C_MAJOR_KEY = [0, 1, 2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26, 28, 30, 31, 33, 35, 37] C_MAJOR_TONIC = 14 A_MINOR_TONIC = 23 # The number of half-steps in musical intervals, in order of dissonance OCTAVE = 12 FIFTH = 7 THIRD = 4 SIXTH = 9 SECOND = 2 FOURTH = 5 SEVENTH = 11 HALFSTEP = 1 # Special intervals that have unique rewards REST_INTERVAL = -1 HOLD_INTERVAL = -1.5 REST_INTERVAL_AFTER_THIRD_OR_FIFTH = -2 HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH = -2.5 IN_KEY_THIRD = -3 IN_KEY_FIFTH = -5 # Indicate melody direction ASCENDING = 1 DESCENDING = -1 # Indicate whether a melodic leap has been resolved or if another leap was made LEAP_RESOLVED = 1 LEAP_DOUBLED = -1 def default_hparams(): """Generates the hparams used to train note rnn used in paper.""" return tf.contrib.training.HParams(use_dynamic_rnn=True, batch_size=BATCH_SIZE, lr=0.0002, l2_reg=2.5e-5, clip_norm=5, initial_learning_rate=0.5, decay_steps=1000, decay_rate=0.85, rnn_layer_sizes=[100], skip_first_n_losses=32, one_hot_length=NUM_CLASSES, exponentially_decay_learning_rate=True) def basic_rnn_hparams(): """Generates the hparams used to train a basic_rnn. These are the hparams used in the .mag file found at https://github.com/tensorflow/magenta/tree/master/magenta/models/ melody_rnn#pre-trained Returns: Hyperparameters of the downloadable basic_rnn pre-trained model. """ # TODO(natashajaques): ability to restore basic_rnn from any .mag file. return tf.contrib.training.HParams(batch_size=128, rnn_layer_sizes=[512, 512], one_hot_length=NUM_CLASSES) def default_dqn_hparams(): """Generates the default hparams for RLTuner DQN model.""" return tf.contrib.training.HParams(random_action_probability=0.1, store_every_nth=1, train_every_nth=5, minibatch_size=32, discount_rate=0.95, max_experience=100000, target_network_update_rate=0.01) def autocorrelate(signal, lag=1): """Gives the correlation coefficient for the signal's correlation with itself. Args: signal: The signal on which to compute the autocorrelation. Can be a list. lag: The offset at which to correlate the signal with itself. E.g. if lag is 1, will compute the correlation between the signal and itself 1 beat later. Returns: Correlation coefficient. """ n = len(signal) x = np.asarray(signal) - np.mean(signal) c0 = np.var(signal) return (x[lag:] * x[:n - lag]).sum() / float(n) / c0 def linear_annealing(n, total, p_initial, p_final): """Linearly interpolates a probability between p_initial and p_final. Current probability is based on the current step, n. Used to linearly anneal the exploration probability of the RLTuner. Args: n: The current step. total: The total number of steps that will be taken (usually the length of the exploration period). p_initial: The initial probability. p_final: The final probability. Returns: The current probability (between p_initial and p_final). """ if n >= total: return p_final else: return p_initial - (n * (p_initial - p_final)) / (total) def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) def sample_softmax(softmax_vect): """Samples a note from an array of softmax probabilities. Tries to do this with numpy, which requires that the probabilities add to 1.0 with extreme precision. If this fails, uses a manual implementation. Args: softmax_vect: An array of probabilities. Returns: The index of the note that was chosen/sampled. """ try: sample = np.argmax(np.random.multinomial(1, pvals=softmax_vect)) return sample except: # pylint: disable=bare-except r = random.uniform(0, np.sum(softmax_vect)) upto = 0 for i in range(len(softmax_vect)): if upto + softmax_vect[i] >= r: return i upto += softmax_vect[i] tf.logging.warn("Error! sample softmax function shouldn't get here") print("Error! sample softmax function shouldn't get here") return len(softmax_vect) - 1 def decoder(event_list, transpose_amount): """Translates a sequence generated by RLTuner to MonophonicMelody form. Args: event_list: Integer list of encoded notes. transpose_amount: Key to transpose to. Returns: Integer list of MIDI values. """ return [e - NUM_SPECIAL_EVENTS if e < NUM_SPECIAL_EVENTS else e + INITIAL_MIDI_VALUE - transpose_amount for e in event_list] def make_onehot(int_list, one_hot_length): """Convert each int to a one-hot vector. A one-hot vector is 0 everywhere except at the index equal to the encoded value. For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...] Args: int_list: A list of ints, each of which will get a one-hot encoding. one_hot_length: The length of the one-hot vector to be created. Returns: A list of one-hot encodings of the ints. """ return [[1.0 if j == i else 0.0 for j in range(one_hot_length)] for i in int_list] def get_inner_scope(scope_str): """Takes a tensorflow scope string and finds the inner scope. Inner scope is one layer more internal. Args: scope_str: Tensorflow variable scope string. Returns: Scope string with outer scope stripped off. """ idx = scope_str.find('/') return scope_str[idx + 1:] def trim_variable_postfixes(scope_str): """Trims any extra numbers added to a tensorflow scope string. Necessary to align variables in graph and checkpoint Args: scope_str: Tensorflow variable scope string. Returns: Scope string with extra numbers trimmed off. """ idx = scope_str.find(':') return scope_str[:idx] def get_variable_names(graph, scope): """Finds all the variable names in a graph that begin with a given scope. Args: graph: A tensorflow graph. scope: A string scope. Returns: List of variables. """ with graph.as_default(): return [v.name for v in tf.global_variables() if v.name.startswith(scope)] def get_next_file_name(directory, prefix, extension): """Finds next available filename in directory by appending numbers to prefix. E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already contains 'myfile.png' and 'myfile1.png', this function will return 'myfile2.png'. Args: directory: Path to the relevant directory. prefix: The filename prefix to use. extension: String extension of the file, eg. '.mid'. Returns: String name of the file. """ name = directory + '/' + prefix + '.' + extension i = 0 while os.path.isfile(name): i += 1 name = directory + '/' + prefix + str(i) + '.' + extension return name def make_rnn_cell(rnn_layer_sizes, state_is_tuple=False): """Makes a default LSTM cell for use in the NoteRNNLoader graph. This model is only to be used for loading the checkpoint from the research paper. In general, events_rnn_graph.make_rnn_cell should be used instead. Args: rnn_layer_sizes: A list of integer sizes (in units) for each layer of the RNN. state_is_tuple: A boolean specifying whether to use tuple of hidden matrix and cell matrix as a state instead of a concatenated matrix. Returns: A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters. """ cells = [] for num_units in rnn_layer_sizes: cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=state_is_tuple) cells.append(cell) cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple) return cell def log_sum_exp(xs): """Computes the log sum exp value of a tensor.""" maxes = tf.reduce_max(xs, keep_dims=True) xs -= maxes return tf.squeeze(maxes, [-1]) + tf.log(tf.reduce_sum(tf.exp(xs), -1))
set_logging_level
Set up logging for the CLI. We either set up global logging based on the verbosity or, if `logger` is specified, we only limit to a single sqlfluff logger. Verbosity is applied in the same way. Implementation: If `logger` is not specified, the handler is attached to the `sqlfluff` logger. If it is specified then it attaches the the logger in question. In addition if `logger` is specified, then that logger will also not propagate.
"""Contains the CLI.""" import sys import json import logging import oyaml as yaml import click # For the profiler import pstats from io import StringIO # To enable colour cross platform import colorama from sqlfluff.cli.formatters import ( format_rules, format_violation, format_linting_result_header, format_linting_stats, colorize, format_dialect_warning, format_dialects, CallbackFormatter, ) from sqlfluff.cli.helpers import cli_table, get_package_version # Import from sqlfluff core. from sqlfluff.core import ( Linter, FluffConfig, SQLLintError, dialect_selector, dialect_readout, TimingSummary, ) class RedWarningsFilter(logging.Filter): """This filter makes all warnings or above red.""" def filter(self, record): """Filter any warnings (or above) to turn them red.""" if record.levelno >= logging.WARNING: record.msg = colorize(record.msg, "red") + " " return True # MASKED: set_logging_level function (lines 51-100) def common_options(f): """Add common options to commands via a decorator. These are applied to all of the cli commands. """ f = click.version_option()(f) f = click.option( "-v", "--verbose", count=True, help=( "Verbosity, how detailed should the output be. This is *stackable*, so `-vv`" " is more verbose than `-v`. For the most verbose option try `-vvvv` or `-vvvvv`." ), )(f) f = click.option( "-n", "--nocolor", is_flag=True, help="No color - if this is set then the output will be without ANSI color codes.", )(f) return f def core_options(f): """Add core operation options to commands via a decorator. These are applied to the main (but not all) cli commands like `parse`, `lint` and `fix`. """ f = click.option( "--dialect", default=None, help="The dialect of SQL to lint (default=ansi)" )(f) f = click.option( "--templater", default=None, help="The templater to use (default=jinja)" )(f) f = click.option( "--rules", default=None, # short_help='Specify a particular rule, or comma separated rules, to check', help=( "Narrow the search to only specific rules. For example " "specifying `--rules L001` will only search for rule `L001` (Unnecessary " "trailing whitespace). Multiple rules can be specified with commas e.g. " "`--rules L001,L002` will specify only looking for violations of rule " "`L001` and rule `L002`." ), )(f) f = click.option( "--exclude-rules", default=None, # short_help='Specify a particular rule, or comma separated rules to exclude', help=( "Exclude specific rules. For example " "specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary " "trailing whitespace) from the set of considered rules. This could either " "be the whitelist, or the general set if there is no specific whitelist. " "Multiple rules can be specified with commas e.g. " "`--exclude-rules L001,L002` will exclude violations of rule " "`L001` and rule `L002`." ), )(f) f = click.option( "--ignore", default=None, help=( "Ignore particular families of errors so that they don't cause a failed " "run. For example `--ignore parsing` would mean that any parsing errors " "are ignored and don't influence the success or fail of a run. Multiple " "options are possible if comma separated e.g. `--ignore parsing,templating`." ), )(f) f = click.option( "--bench", is_flag=True, help="Set this flag to engage the benchmarking tool output.", )(f) f = click.option( "--logger", type=click.Choice(["parser", "linter", "rules"], case_sensitive=False), help="Choose to limit the logging to one of the loggers.", )(f) return f def get_config(**kwargs): """Get a config object from kwargs.""" if kwargs.get("dialect", None): try: # We're just making sure it exists at this stage - it will be fetched properly in the linter dialect_selector(kwargs["dialect"]) except KeyError: click.echo("Error: Unknown dialect {0!r}".format(kwargs["dialect"])) sys.exit(66) # Instantiate a config object (filtering out the nulls) overrides = {k: kwargs[k] for k in kwargs if kwargs[k] is not None} return FluffConfig.from_root(overrides=overrides) def get_linter_and_formatter(cfg, silent=False): """Get a linter object given a config.""" try: # We're just making sure it exists at this stage - it will be fetched properly in the linter dialect_selector(cfg.get("dialect")) except KeyError: click.echo("Error: Unknown dialect {0!r}".format(cfg.get("dialect"))) sys.exit(66) if not silent: # Instantiate the linter and return (with an output function) formatter = CallbackFormatter( callback=lambda m: click.echo(m, color=cfg.get("color")), verbosity=cfg.get("verbose"), output_line_length=cfg.get("output_line_length"), ) return Linter(config=cfg, formatter=formatter), formatter else: # Instantiate the linter and return. NB: No formatter # in the Linter and a black formatter otherwise. formatter = CallbackFormatter(callback=lambda m: None, verbosity=0) return Linter(config=cfg), formatter @click.group() @click.version_option() def cli(): """Sqlfluff is a modular sql linter for humans.""" @cli.command() @common_options def version(**kwargs): """Show the version of sqlfluff.""" c = get_config(**kwargs) if c.get("verbose") > 0: # Instantiate the linter lnt, formatter = get_linter_and_formatter(c) # Dispatch the detailed config from the linter. formatter.dispatch_config(lnt) else: # Otherwise just output the package version. click.echo(get_package_version(), color=c.get("color")) @cli.command() @common_options def rules(**kwargs): """Show the current rules in use.""" c = get_config(**kwargs) lnt, _ = get_linter_and_formatter(c) click.echo(format_rules(lnt), color=c.get("color")) @cli.command() @common_options def dialects(**kwargs): """Show the current dialects available.""" c = get_config(**kwargs) click.echo(format_dialects(dialect_readout), color=c.get("color")) @cli.command() @common_options @core_options @click.option( "-f", "--format", "format", default="human", type=click.Choice(["human", "json", "yaml"], case_sensitive=False), help="What format to return the lint result in.", ) @click.option( "--nofail", is_flag=True, help=( "If set, the exit code will always be zero, regardless of violations " "found. This is potentially useful during rollout." ), ) @click.option( "--disregard-sqlfluffignores", is_flag=True, help=("Perform the operation regardless of .sqlfluffignore configurations"), ) @click.option( "-p", "--parallel", type=int, default=1, help="If set to a value higher than 1, run SQLFluff in parallel, " "speeding up processing.", ) @click.argument("paths", nargs=-1) def lint( paths, parallel, format, nofail, disregard_sqlfluffignores, logger=None, bench=False, **kwargs, ): """Lint SQL files via passing a list of files or using stdin. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. Linting SQL files: sqlfluff lint path/to/file.sql sqlfluff lint directory/of/sql/files Linting a file via stdin (note the lone '-' character): cat path/to/file.sql | sqlfluff lint - echo 'select col from tbl' | sqlfluff lint - """ c = get_config(**kwargs) non_human_output = format in ("json", "yaml") lnt, formatter = get_linter_and_formatter(c, silent=non_human_output) verbose = c.get("verbose") formatter.dispatch_config(lnt) # Set up logging. set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output) # add stdin if specified via lone '-' if ("-",) == paths: result = lnt.lint_string_wrapped(sys.stdin.read(), fname="stdin") else: # Output the results as we go if verbose >= 1: click.echo(format_linting_result_header()) try: result = lnt.lint_paths( paths, ignore_non_existent_files=False, ignore_files=not disregard_sqlfluffignores, parallel=parallel, ) except IOError: click.echo( colorize( "The path(s) {0!r} could not be accessed. Check it/they exist(s).".format( paths ), "red", ) ) sys.exit(1) # Output the final stats if verbose >= 1: click.echo(format_linting_stats(result, verbose=verbose)) if format == "json": click.echo(json.dumps(result.as_records())) elif format == "yaml": click.echo(yaml.dump(result.as_records())) if bench: click.echo("==== overall timings ====") timing_summary = result.timing_summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo(cli_table(timing_summary[step].items())) if not nofail: if not non_human_output: click.echo("All Finished 📜 🎉!") sys.exit(result.stats()["exit code"]) else: sys.exit(0) def do_fixes(lnt, result, formatter=None, **kwargs): """Actually do the fixes.""" click.echo("Persisting Changes...") res = result.persist_changes(formatter=formatter, **kwargs) if all(res.values()): click.echo("Done. Please check your files to confirm.") return True # If some failed then return false click.echo("Done. Some operations failed. Please check your files to confirm.") click.echo("Some errors cannot be fixed or there is another error blocking it.") return False @cli.command() @common_options @core_options @click.option( "-f", "--force", is_flag=True, help=( "skip the confirmation prompt and go straight to applying " "fixes. **Use this with caution.**" ), ) @click.option( "--fixed-suffix", default=None, help="An optional suffix to add to fixed files." ) @click.option( "--parallel", type=int, default=1, help="If set to a value higher than 1, run SQLFluff in parallel, " "speeding up processing.", ) @click.argument("paths", nargs=-1) def fix(force, paths, parallel, bench=False, fixed_suffix="", logger=None, **kwargs): """Fix SQL files. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ # some quick checks fixing_stdin = ("-",) == paths c = get_config(**kwargs) lnt, formatter = get_linter_and_formatter(c, silent=fixing_stdin) verbose = c.get("verbose") formatter.dispatch_config(lnt) # Set up logging. set_logging_level(verbosity=verbose, logger=logger, stderr_output=fixing_stdin) # handle stdin case. should output formatted sql to stdout and nothing else. if fixing_stdin: stdin = sys.stdin.read() result = lnt.lint_string_wrapped(stdin, fname="stdin", fix=True) stdout = result.paths[0].files[0].fix_string()[0] click.echo(stdout, nl=False) sys.exit() # Lint the paths (not with the fix argument at this stage), outputting as we go. click.echo("==== finding fixable violations ====") try: result = lnt.lint_paths( paths, fix=True, ignore_non_existent_files=False, parallel=parallel ) except IOError: click.echo( colorize( "The path(s) {0!r} could not be accessed. Check it/they exist(s).".format( paths ), "red", ) ) sys.exit(1) # NB: We filter to linting violations here, because they're # the only ones which can be potentially fixed. if result.num_violations(types=SQLLintError, fixable=True) > 0: click.echo("==== fixing violations ====") click.echo( "{0} fixable linting violations found".format( result.num_violations(types=SQLLintError, fixable=True) ) ) if force: click.echo(colorize("FORCE MODE", "red") + ": Attempting fixes...") success = do_fixes( lnt, result, formatter, types=SQLLintError, fixed_file_suffix=fixed_suffix, ) if not success: sys.exit(1) else: click.echo( "Are you sure you wish to attempt to fix these? [Y/n] ", nl=False ) c = click.getchar().lower() click.echo("...") if c in ("y", "\r", "\n"): click.echo("Attempting fixes...") success = do_fixes( lnt, result, formatter, types=SQLLintError, fixed_file_suffix=fixed_suffix, ) if not success: sys.exit(1) else: click.echo("All Finished 📜 🎉!") elif c == "n": click.echo("Aborting...") else: click.echo("Invalid input, please enter 'Y' or 'N'") click.echo("Aborting...") else: click.echo("==== no fixable linting violations found ====") if result.num_violations(types=SQLLintError, fixable=False) > 0: click.echo( " [{0} unfixable linting violations found]".format( result.num_violations(types=SQLLintError, fixable=False) ) ) click.echo("All Finished 📜 🎉!") if bench: click.echo("==== overall timings ====") timing_summary = result.timing_summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo(cli_table(timing_summary[step].items())) sys.exit(0) def quoted_presenter(dumper, data): """Re-presenter which always double quotes string values needing escapes.""" if "\n" in data or "\t" in data or "'" in data: return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"') else: return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="") @cli.command() @common_options @core_options @click.argument("path", nargs=1) @click.option( "--recurse", default=0, help="The depth to recursively parse to (0 for unlimited)" ) @click.option( "-c", "--code-only", is_flag=True, help="Output only the code elements of the parse tree.", ) @click.option( "-f", "--format", default="human", type=click.Choice(["human", "json", "yaml"], case_sensitive=False), help="What format to return the parse result in.", ) @click.option( "--profiler", is_flag=True, help="Set this flag to engage the python profiler." ) @click.option( "--nofail", is_flag=True, help=( "If set, the exit code will always be zero, regardless of violations " "found. This is potentially useful during rollout." ), ) def parse(path, code_only, format, profiler, bench, nofail, logger=None, **kwargs): """Parse SQL files and just spit out the result. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ c = get_config(**kwargs) # We don't want anything else to be logged if we want json or yaml output non_human_output = format in ("json", "yaml") lnt, formatter = get_linter_and_formatter(c, silent=non_human_output) verbose = c.get("verbose") recurse = c.get("recurse") formatter.dispatch_config(lnt) # Set up logging. set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output) # TODO: do this better nv = 0 if profiler: # Set up the profiler if required try: import cProfile except ImportError: click.echo("The cProfiler is not available on your platform.") sys.exit(1) pr = cProfile.Profile() pr.enable() try: # handle stdin if specified via lone '-' if "-" == path: # put the parser result in a list to iterate later result = [ lnt.parse_string( sys.stdin.read(), "stdin", recurse=recurse, config=lnt.config ), ] else: # A single path must be specified for this command result = lnt.parse_path(path, recurse=recurse) # iterative print for human readout if format == "human": timing = TimingSummary() for parsed_string in result: timing.add(parsed_string.time_dict) if parsed_string.tree: click.echo(parsed_string.tree.stringify(code_only=code_only)) else: # TODO: Make this prettier click.echo("...Failed to Parse...") nv += len(parsed_string.violations) if parsed_string.violations: click.echo("==== parsing violations ====") for v in parsed_string.violations: click.echo(format_violation(v)) if ( parsed_string.violations and parsed_string.config.get("dialect") == "ansi" ): click.echo(format_dialect_warning()) if verbose >= 2: click.echo("==== timings ====") click.echo(cli_table(parsed_string.time_dict.items())) if verbose >= 2 or bench: click.echo("==== overall timings ====") timing_summary = timing.summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo(cli_table(timing_summary[step].items())) else: # collect result and print as single payload # will need to zip in the file paths filepaths = ["stdin"] if "-" == path else lnt.paths_from_path(path) result = [ dict( filepath=filepath, segments=parsed.as_record(code_only=code_only, show_raw=True) if parsed else None, ) for filepath, (parsed, _, _, _, _) in zip(filepaths, result) ] if format == "yaml": # For yaml dumping always dump double quoted strings if they contain tabs or newlines. yaml.add_representer(str, quoted_presenter) click.echo(yaml.dump(result)) elif format == "json": click.echo(json.dumps(result)) except IOError: click.echo( colorize( "The path {0!r} could not be accessed. Check it exists.".format(path), "red", ) ) sys.exit(1) if profiler: pr.disable() profiler_buffer = StringIO() ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats("cumulative") ps.print_stats() click.echo("==== profiler stats ====") # Only print the first 50 lines of it click.echo("\n".join(profiler_buffer.getvalue().split("\n")[:50])) if nv > 0 and not nofail: sys.exit(66) else: sys.exit(0) # This "__main__" handler allows invoking SQLFluff using "python -m", which # simplifies the use of cProfile, e.g.: # python -m cProfile -s cumtime -m sqlfluff.cli.commands lint slow_file.sql if __name__ == "__main__": cli.main(sys.argv[1:])
def set_logging_level(verbosity, logger=None, stderr_output=False): """Set up logging for the CLI. We either set up global logging based on the verbosity or, if `logger` is specified, we only limit to a single sqlfluff logger. Verbosity is applied in the same way. Implementation: If `logger` is not specified, the handler is attached to the `sqlfluff` logger. If it is specified then it attaches the the logger in question. In addition if `logger` is specified, then that logger will also not propagate. """ fluff_logger = logging.getLogger("sqlfluff") # Don't propagate logging fluff_logger.propagate = False # Enable colorama colorama.init() # Set up the log handler to log to stdout handler = logging.StreamHandler(stream=sys.stderr if stderr_output else sys.stdout) # NB: the unicode character at the beginning is to squash any badly # tamed ANSI colour statements, and return us to normality. handler.setFormatter(logging.Formatter("\u001b[0m%(levelname)-10s %(message)s")) # Set up a handler to colour warnings red. handler.addFilter(RedWarningsFilter()) if logger: focus_logger = logging.getLogger("sqlfluff.{0}".format(logger)) focus_logger.addHandler(handler) else: fluff_logger.addHandler(handler) # NB: We treat the parser logger slightly differently because it's noisier. # It's important that we set levels for all each time so # that we don't break tests by changing the granularity # between tests. parser_logger = logging.getLogger("sqlfluff.parser") if verbosity < 3: fluff_logger.setLevel(logging.WARNING) parser_logger.setLevel(logging.NOTSET) elif verbosity == 3: fluff_logger.setLevel(logging.INFO) parser_logger.setLevel(logging.WARNING) elif verbosity == 4: fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.INFO) elif verbosity > 4: fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.DEBUG)
51
100
"""Contains the CLI.""" import sys import json import logging import oyaml as yaml import click # For the profiler import pstats from io import StringIO # To enable colour cross platform import colorama from sqlfluff.cli.formatters import ( format_rules, format_violation, format_linting_result_header, format_linting_stats, colorize, format_dialect_warning, format_dialects, CallbackFormatter, ) from sqlfluff.cli.helpers import cli_table, get_package_version # Import from sqlfluff core. from sqlfluff.core import ( Linter, FluffConfig, SQLLintError, dialect_selector, dialect_readout, TimingSummary, ) class RedWarningsFilter(logging.Filter): """This filter makes all warnings or above red.""" def filter(self, record): """Filter any warnings (or above) to turn them red.""" if record.levelno >= logging.WARNING: record.msg = colorize(record.msg, "red") + " " return True def set_logging_level(verbosity, logger=None, stderr_output=False): """Set up logging for the CLI. We either set up global logging based on the verbosity or, if `logger` is specified, we only limit to a single sqlfluff logger. Verbosity is applied in the same way. Implementation: If `logger` is not specified, the handler is attached to the `sqlfluff` logger. If it is specified then it attaches the the logger in question. In addition if `logger` is specified, then that logger will also not propagate. """ fluff_logger = logging.getLogger("sqlfluff") # Don't propagate logging fluff_logger.propagate = False # Enable colorama colorama.init() # Set up the log handler to log to stdout handler = logging.StreamHandler(stream=sys.stderr if stderr_output else sys.stdout) # NB: the unicode character at the beginning is to squash any badly # tamed ANSI colour statements, and return us to normality. handler.setFormatter(logging.Formatter("\u001b[0m%(levelname)-10s %(message)s")) # Set up a handler to colour warnings red. handler.addFilter(RedWarningsFilter()) if logger: focus_logger = logging.getLogger("sqlfluff.{0}".format(logger)) focus_logger.addHandler(handler) else: fluff_logger.addHandler(handler) # NB: We treat the parser logger slightly differently because it's noisier. # It's important that we set levels for all each time so # that we don't break tests by changing the granularity # between tests. parser_logger = logging.getLogger("sqlfluff.parser") if verbosity < 3: fluff_logger.setLevel(logging.WARNING) parser_logger.setLevel(logging.NOTSET) elif verbosity == 3: fluff_logger.setLevel(logging.INFO) parser_logger.setLevel(logging.WARNING) elif verbosity == 4: fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.INFO) elif verbosity > 4: fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.DEBUG) def common_options(f): """Add common options to commands via a decorator. These are applied to all of the cli commands. """ f = click.version_option()(f) f = click.option( "-v", "--verbose", count=True, help=( "Verbosity, how detailed should the output be. This is *stackable*, so `-vv`" " is more verbose than `-v`. For the most verbose option try `-vvvv` or `-vvvvv`." ), )(f) f = click.option( "-n", "--nocolor", is_flag=True, help="No color - if this is set then the output will be without ANSI color codes.", )(f) return f def core_options(f): """Add core operation options to commands via a decorator. These are applied to the main (but not all) cli commands like `parse`, `lint` and `fix`. """ f = click.option( "--dialect", default=None, help="The dialect of SQL to lint (default=ansi)" )(f) f = click.option( "--templater", default=None, help="The templater to use (default=jinja)" )(f) f = click.option( "--rules", default=None, # short_help='Specify a particular rule, or comma separated rules, to check', help=( "Narrow the search to only specific rules. For example " "specifying `--rules L001` will only search for rule `L001` (Unnecessary " "trailing whitespace). Multiple rules can be specified with commas e.g. " "`--rules L001,L002` will specify only looking for violations of rule " "`L001` and rule `L002`." ), )(f) f = click.option( "--exclude-rules", default=None, # short_help='Specify a particular rule, or comma separated rules to exclude', help=( "Exclude specific rules. For example " "specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary " "trailing whitespace) from the set of considered rules. This could either " "be the whitelist, or the general set if there is no specific whitelist. " "Multiple rules can be specified with commas e.g. " "`--exclude-rules L001,L002` will exclude violations of rule " "`L001` and rule `L002`." ), )(f) f = click.option( "--ignore", default=None, help=( "Ignore particular families of errors so that they don't cause a failed " "run. For example `--ignore parsing` would mean that any parsing errors " "are ignored and don't influence the success or fail of a run. Multiple " "options are possible if comma separated e.g. `--ignore parsing,templating`." ), )(f) f = click.option( "--bench", is_flag=True, help="Set this flag to engage the benchmarking tool output.", )(f) f = click.option( "--logger", type=click.Choice(["parser", "linter", "rules"], case_sensitive=False), help="Choose to limit the logging to one of the loggers.", )(f) return f def get_config(**kwargs): """Get a config object from kwargs.""" if kwargs.get("dialect", None): try: # We're just making sure it exists at this stage - it will be fetched properly in the linter dialect_selector(kwargs["dialect"]) except KeyError: click.echo("Error: Unknown dialect {0!r}".format(kwargs["dialect"])) sys.exit(66) # Instantiate a config object (filtering out the nulls) overrides = {k: kwargs[k] for k in kwargs if kwargs[k] is not None} return FluffConfig.from_root(overrides=overrides) def get_linter_and_formatter(cfg, silent=False): """Get a linter object given a config.""" try: # We're just making sure it exists at this stage - it will be fetched properly in the linter dialect_selector(cfg.get("dialect")) except KeyError: click.echo("Error: Unknown dialect {0!r}".format(cfg.get("dialect"))) sys.exit(66) if not silent: # Instantiate the linter and return (with an output function) formatter = CallbackFormatter( callback=lambda m: click.echo(m, color=cfg.get("color")), verbosity=cfg.get("verbose"), output_line_length=cfg.get("output_line_length"), ) return Linter(config=cfg, formatter=formatter), formatter else: # Instantiate the linter and return. NB: No formatter # in the Linter and a black formatter otherwise. formatter = CallbackFormatter(callback=lambda m: None, verbosity=0) return Linter(config=cfg), formatter @click.group() @click.version_option() def cli(): """Sqlfluff is a modular sql linter for humans.""" @cli.command() @common_options def version(**kwargs): """Show the version of sqlfluff.""" c = get_config(**kwargs) if c.get("verbose") > 0: # Instantiate the linter lnt, formatter = get_linter_and_formatter(c) # Dispatch the detailed config from the linter. formatter.dispatch_config(lnt) else: # Otherwise just output the package version. click.echo(get_package_version(), color=c.get("color")) @cli.command() @common_options def rules(**kwargs): """Show the current rules in use.""" c = get_config(**kwargs) lnt, _ = get_linter_and_formatter(c) click.echo(format_rules(lnt), color=c.get("color")) @cli.command() @common_options def dialects(**kwargs): """Show the current dialects available.""" c = get_config(**kwargs) click.echo(format_dialects(dialect_readout), color=c.get("color")) @cli.command() @common_options @core_options @click.option( "-f", "--format", "format", default="human", type=click.Choice(["human", "json", "yaml"], case_sensitive=False), help="What format to return the lint result in.", ) @click.option( "--nofail", is_flag=True, help=( "If set, the exit code will always be zero, regardless of violations " "found. This is potentially useful during rollout." ), ) @click.option( "--disregard-sqlfluffignores", is_flag=True, help=("Perform the operation regardless of .sqlfluffignore configurations"), ) @click.option( "-p", "--parallel", type=int, default=1, help="If set to a value higher than 1, run SQLFluff in parallel, " "speeding up processing.", ) @click.argument("paths", nargs=-1) def lint( paths, parallel, format, nofail, disregard_sqlfluffignores, logger=None, bench=False, **kwargs, ): """Lint SQL files via passing a list of files or using stdin. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. Linting SQL files: sqlfluff lint path/to/file.sql sqlfluff lint directory/of/sql/files Linting a file via stdin (note the lone '-' character): cat path/to/file.sql | sqlfluff lint - echo 'select col from tbl' | sqlfluff lint - """ c = get_config(**kwargs) non_human_output = format in ("json", "yaml") lnt, formatter = get_linter_and_formatter(c, silent=non_human_output) verbose = c.get("verbose") formatter.dispatch_config(lnt) # Set up logging. set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output) # add stdin if specified via lone '-' if ("-",) == paths: result = lnt.lint_string_wrapped(sys.stdin.read(), fname="stdin") else: # Output the results as we go if verbose >= 1: click.echo(format_linting_result_header()) try: result = lnt.lint_paths( paths, ignore_non_existent_files=False, ignore_files=not disregard_sqlfluffignores, parallel=parallel, ) except IOError: click.echo( colorize( "The path(s) {0!r} could not be accessed. Check it/they exist(s).".format( paths ), "red", ) ) sys.exit(1) # Output the final stats if verbose >= 1: click.echo(format_linting_stats(result, verbose=verbose)) if format == "json": click.echo(json.dumps(result.as_records())) elif format == "yaml": click.echo(yaml.dump(result.as_records())) if bench: click.echo("==== overall timings ====") timing_summary = result.timing_summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo(cli_table(timing_summary[step].items())) if not nofail: if not non_human_output: click.echo("All Finished 📜 🎉!") sys.exit(result.stats()["exit code"]) else: sys.exit(0) def do_fixes(lnt, result, formatter=None, **kwargs): """Actually do the fixes.""" click.echo("Persisting Changes...") res = result.persist_changes(formatter=formatter, **kwargs) if all(res.values()): click.echo("Done. Please check your files to confirm.") return True # If some failed then return false click.echo("Done. Some operations failed. Please check your files to confirm.") click.echo("Some errors cannot be fixed or there is another error blocking it.") return False @cli.command() @common_options @core_options @click.option( "-f", "--force", is_flag=True, help=( "skip the confirmation prompt and go straight to applying " "fixes. **Use this with caution.**" ), ) @click.option( "--fixed-suffix", default=None, help="An optional suffix to add to fixed files." ) @click.option( "--parallel", type=int, default=1, help="If set to a value higher than 1, run SQLFluff in parallel, " "speeding up processing.", ) @click.argument("paths", nargs=-1) def fix(force, paths, parallel, bench=False, fixed_suffix="", logger=None, **kwargs): """Fix SQL files. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ # some quick checks fixing_stdin = ("-",) == paths c = get_config(**kwargs) lnt, formatter = get_linter_and_formatter(c, silent=fixing_stdin) verbose = c.get("verbose") formatter.dispatch_config(lnt) # Set up logging. set_logging_level(verbosity=verbose, logger=logger, stderr_output=fixing_stdin) # handle stdin case. should output formatted sql to stdout and nothing else. if fixing_stdin: stdin = sys.stdin.read() result = lnt.lint_string_wrapped(stdin, fname="stdin", fix=True) stdout = result.paths[0].files[0].fix_string()[0] click.echo(stdout, nl=False) sys.exit() # Lint the paths (not with the fix argument at this stage), outputting as we go. click.echo("==== finding fixable violations ====") try: result = lnt.lint_paths( paths, fix=True, ignore_non_existent_files=False, parallel=parallel ) except IOError: click.echo( colorize( "The path(s) {0!r} could not be accessed. Check it/they exist(s).".format( paths ), "red", ) ) sys.exit(1) # NB: We filter to linting violations here, because they're # the only ones which can be potentially fixed. if result.num_violations(types=SQLLintError, fixable=True) > 0: click.echo("==== fixing violations ====") click.echo( "{0} fixable linting violations found".format( result.num_violations(types=SQLLintError, fixable=True) ) ) if force: click.echo(colorize("FORCE MODE", "red") + ": Attempting fixes...") success = do_fixes( lnt, result, formatter, types=SQLLintError, fixed_file_suffix=fixed_suffix, ) if not success: sys.exit(1) else: click.echo( "Are you sure you wish to attempt to fix these? [Y/n] ", nl=False ) c = click.getchar().lower() click.echo("...") if c in ("y", "\r", "\n"): click.echo("Attempting fixes...") success = do_fixes( lnt, result, formatter, types=SQLLintError, fixed_file_suffix=fixed_suffix, ) if not success: sys.exit(1) else: click.echo("All Finished 📜 🎉!") elif c == "n": click.echo("Aborting...") else: click.echo("Invalid input, please enter 'Y' or 'N'") click.echo("Aborting...") else: click.echo("==== no fixable linting violations found ====") if result.num_violations(types=SQLLintError, fixable=False) > 0: click.echo( " [{0} unfixable linting violations found]".format( result.num_violations(types=SQLLintError, fixable=False) ) ) click.echo("All Finished 📜 🎉!") if bench: click.echo("==== overall timings ====") timing_summary = result.timing_summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo(cli_table(timing_summary[step].items())) sys.exit(0) def quoted_presenter(dumper, data): """Re-presenter which always double quotes string values needing escapes.""" if "\n" in data or "\t" in data or "'" in data: return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"') else: return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="") @cli.command() @common_options @core_options @click.argument("path", nargs=1) @click.option( "--recurse", default=0, help="The depth to recursively parse to (0 for unlimited)" ) @click.option( "-c", "--code-only", is_flag=True, help="Output only the code elements of the parse tree.", ) @click.option( "-f", "--format", default="human", type=click.Choice(["human", "json", "yaml"], case_sensitive=False), help="What format to return the parse result in.", ) @click.option( "--profiler", is_flag=True, help="Set this flag to engage the python profiler." ) @click.option( "--nofail", is_flag=True, help=( "If set, the exit code will always be zero, regardless of violations " "found. This is potentially useful during rollout." ), ) def parse(path, code_only, format, profiler, bench, nofail, logger=None, **kwargs): """Parse SQL files and just spit out the result. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ c = get_config(**kwargs) # We don't want anything else to be logged if we want json or yaml output non_human_output = format in ("json", "yaml") lnt, formatter = get_linter_and_formatter(c, silent=non_human_output) verbose = c.get("verbose") recurse = c.get("recurse") formatter.dispatch_config(lnt) # Set up logging. set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output) # TODO: do this better nv = 0 if profiler: # Set up the profiler if required try: import cProfile except ImportError: click.echo("The cProfiler is not available on your platform.") sys.exit(1) pr = cProfile.Profile() pr.enable() try: # handle stdin if specified via lone '-' if "-" == path: # put the parser result in a list to iterate later result = [ lnt.parse_string( sys.stdin.read(), "stdin", recurse=recurse, config=lnt.config ), ] else: # A single path must be specified for this command result = lnt.parse_path(path, recurse=recurse) # iterative print for human readout if format == "human": timing = TimingSummary() for parsed_string in result: timing.add(parsed_string.time_dict) if parsed_string.tree: click.echo(parsed_string.tree.stringify(code_only=code_only)) else: # TODO: Make this prettier click.echo("...Failed to Parse...") nv += len(parsed_string.violations) if parsed_string.violations: click.echo("==== parsing violations ====") for v in parsed_string.violations: click.echo(format_violation(v)) if ( parsed_string.violations and parsed_string.config.get("dialect") == "ansi" ): click.echo(format_dialect_warning()) if verbose >= 2: click.echo("==== timings ====") click.echo(cli_table(parsed_string.time_dict.items())) if verbose >= 2 or bench: click.echo("==== overall timings ====") timing_summary = timing.summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo(cli_table(timing_summary[step].items())) else: # collect result and print as single payload # will need to zip in the file paths filepaths = ["stdin"] if "-" == path else lnt.paths_from_path(path) result = [ dict( filepath=filepath, segments=parsed.as_record(code_only=code_only, show_raw=True) if parsed else None, ) for filepath, (parsed, _, _, _, _) in zip(filepaths, result) ] if format == "yaml": # For yaml dumping always dump double quoted strings if they contain tabs or newlines. yaml.add_representer(str, quoted_presenter) click.echo(yaml.dump(result)) elif format == "json": click.echo(json.dumps(result)) except IOError: click.echo( colorize( "The path {0!r} could not be accessed. Check it exists.".format(path), "red", ) ) sys.exit(1) if profiler: pr.disable() profiler_buffer = StringIO() ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats("cumulative") ps.print_stats() click.echo("==== profiler stats ====") # Only print the first 50 lines of it click.echo("\n".join(profiler_buffer.getvalue().split("\n")[:50])) if nv > 0 and not nofail: sys.exit(66) else: sys.exit(0) # This "__main__" handler allows invoking SQLFluff using "python -m", which # simplifies the use of cProfile, e.g.: # python -m cProfile -s cumtime -m sqlfluff.cli.commands lint slow_file.sql if __name__ == "__main__": cli.main(sys.argv[1:])
__init__
Active Directory :param pulumi.Input[str] active_directory_id: Id of the Active Directory :param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain :param pulumi.Input[str] domain: Name of the Active Directory domain :param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory :param pulumi.Input[str] password: Plain text password of Active Directory domain administrator :param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes :param pulumi.Input[str] status: Status of the Active Directory :param pulumi.Input[str] username: Username of Active Directory domain administrator
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = [ 'ActiveDirectoryArgs', 'ExportPolicyRuleArgs', 'VolumePropertiesExportPolicyArgs', ] @pulumi.input_type class ActiveDirectoryArgs: # MASKED: __init__ function (lines 20-55) @property @pulumi.getter(name="activeDirectoryId") def active_directory_id(self) -> Optional[pulumi.Input[str]]: """ Id of the Active Directory """ return pulumi.get(self, "active_directory_id") @active_directory_id.setter def active_directory_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "active_directory_id", value) @property @pulumi.getter def dns(self) -> Optional[pulumi.Input[str]]: """ Comma separated list of DNS server IP addresses for the Active Directory domain """ return pulumi.get(self, "dns") @dns.setter def dns(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dns", value) @property @pulumi.getter def domain(self) -> Optional[pulumi.Input[str]]: """ Name of the Active Directory domain """ return pulumi.get(self, "domain") @domain.setter def domain(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "domain", value) @property @pulumi.getter(name="organizationalUnit") def organizational_unit(self) -> Optional[pulumi.Input[str]]: """ The Organizational Unit (OU) within the Windows Active Directory """ return pulumi.get(self, "organizational_unit") @organizational_unit.setter def organizational_unit(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "organizational_unit", value) @property @pulumi.getter def password(self) -> Optional[pulumi.Input[str]]: """ Plain text password of Active Directory domain administrator """ return pulumi.get(self, "password") @password.setter def password(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "password", value) @property @pulumi.getter(name="smbServerName") def smb_server_name(self) -> Optional[pulumi.Input[str]]: """ NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes """ return pulumi.get(self, "smb_server_name") @smb_server_name.setter def smb_server_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "smb_server_name", value) @property @pulumi.getter def status(self) -> Optional[pulumi.Input[str]]: """ Status of the Active Directory """ return pulumi.get(self, "status") @status.setter def status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "status", value) @property @pulumi.getter def username(self) -> Optional[pulumi.Input[str]]: """ Username of Active Directory domain administrator """ return pulumi.get(self, "username") @username.setter def username(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "username", value) @pulumi.input_type class ExportPolicyRuleArgs: def __init__(__self__, *, allowed_clients: Optional[pulumi.Input[str]] = None, cifs: Optional[pulumi.Input[bool]] = None, nfsv3: Optional[pulumi.Input[bool]] = None, nfsv4: Optional[pulumi.Input[bool]] = None, rule_index: Optional[pulumi.Input[int]] = None, unix_read_only: Optional[pulumi.Input[bool]] = None, unix_read_write: Optional[pulumi.Input[bool]] = None): """ Volume Export Policy Rule :param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names :param pulumi.Input[bool] cifs: Allows CIFS protocol :param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol :param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later :param pulumi.Input[int] rule_index: Order index :param pulumi.Input[bool] unix_read_only: Read only access :param pulumi.Input[bool] unix_read_write: Read and write access """ if allowed_clients is not None: pulumi.set(__self__, "allowed_clients", allowed_clients) if cifs is not None: pulumi.set(__self__, "cifs", cifs) if nfsv3 is not None: pulumi.set(__self__, "nfsv3", nfsv3) if nfsv4 is not None: pulumi.set(__self__, "nfsv4", nfsv4) if rule_index is not None: pulumi.set(__self__, "rule_index", rule_index) if unix_read_only is not None: pulumi.set(__self__, "unix_read_only", unix_read_only) if unix_read_write is not None: pulumi.set(__self__, "unix_read_write", unix_read_write) @property @pulumi.getter(name="allowedClients") def allowed_clients(self) -> Optional[pulumi.Input[str]]: """ Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names """ return pulumi.get(self, "allowed_clients") @allowed_clients.setter def allowed_clients(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "allowed_clients", value) @property @pulumi.getter def cifs(self) -> Optional[pulumi.Input[bool]]: """ Allows CIFS protocol """ return pulumi.get(self, "cifs") @cifs.setter def cifs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "cifs", value) @property @pulumi.getter def nfsv3(self) -> Optional[pulumi.Input[bool]]: """ Allows NFSv3 protocol """ return pulumi.get(self, "nfsv3") @nfsv3.setter def nfsv3(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nfsv3", value) @property @pulumi.getter def nfsv4(self) -> Optional[pulumi.Input[bool]]: """ Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later """ return pulumi.get(self, "nfsv4") @nfsv4.setter def nfsv4(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nfsv4", value) @property @pulumi.getter(name="ruleIndex") def rule_index(self) -> Optional[pulumi.Input[int]]: """ Order index """ return pulumi.get(self, "rule_index") @rule_index.setter def rule_index(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rule_index", value) @property @pulumi.getter(name="unixReadOnly") def unix_read_only(self) -> Optional[pulumi.Input[bool]]: """ Read only access """ return pulumi.get(self, "unix_read_only") @unix_read_only.setter def unix_read_only(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "unix_read_only", value) @property @pulumi.getter(name="unixReadWrite") def unix_read_write(self) -> Optional[pulumi.Input[bool]]: """ Read and write access """ return pulumi.get(self, "unix_read_write") @unix_read_write.setter def unix_read_write(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "unix_read_write", value) @pulumi.input_type class VolumePropertiesExportPolicyArgs: def __init__(__self__, *, rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]] = None): """ Set of export policy rules :param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule """ if rules is not None: pulumi.set(__self__, "rules", rules) @property @pulumi.getter def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]: """ Export policy rule """ return pulumi.get(self, "rules") @rules.setter def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]): pulumi.set(self, "rules", value)
def __init__(__self__, *, active_directory_id: Optional[pulumi.Input[str]] = None, dns: Optional[pulumi.Input[str]] = None, domain: Optional[pulumi.Input[str]] = None, organizational_unit: Optional[pulumi.Input[str]] = None, password: Optional[pulumi.Input[str]] = None, smb_server_name: Optional[pulumi.Input[str]] = None, status: Optional[pulumi.Input[str]] = None, username: Optional[pulumi.Input[str]] = None): """ Active Directory :param pulumi.Input[str] active_directory_id: Id of the Active Directory :param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain :param pulumi.Input[str] domain: Name of the Active Directory domain :param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory :param pulumi.Input[str] password: Plain text password of Active Directory domain administrator :param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes :param pulumi.Input[str] status: Status of the Active Directory :param pulumi.Input[str] username: Username of Active Directory domain administrator """ if active_directory_id is not None: pulumi.set(__self__, "active_directory_id", active_directory_id) if dns is not None: pulumi.set(__self__, "dns", dns) if domain is not None: pulumi.set(__self__, "domain", domain) if organizational_unit is not None: pulumi.set(__self__, "organizational_unit", organizational_unit) if password is not None: pulumi.set(__self__, "password", password) if smb_server_name is not None: pulumi.set(__self__, "smb_server_name", smb_server_name) if status is not None: pulumi.set(__self__, "status", status) if username is not None: pulumi.set(__self__, "username", username)
20
55
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = [ 'ActiveDirectoryArgs', 'ExportPolicyRuleArgs', 'VolumePropertiesExportPolicyArgs', ] @pulumi.input_type class ActiveDirectoryArgs: def __init__(__self__, *, active_directory_id: Optional[pulumi.Input[str]] = None, dns: Optional[pulumi.Input[str]] = None, domain: Optional[pulumi.Input[str]] = None, organizational_unit: Optional[pulumi.Input[str]] = None, password: Optional[pulumi.Input[str]] = None, smb_server_name: Optional[pulumi.Input[str]] = None, status: Optional[pulumi.Input[str]] = None, username: Optional[pulumi.Input[str]] = None): """ Active Directory :param pulumi.Input[str] active_directory_id: Id of the Active Directory :param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain :param pulumi.Input[str] domain: Name of the Active Directory domain :param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory :param pulumi.Input[str] password: Plain text password of Active Directory domain administrator :param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes :param pulumi.Input[str] status: Status of the Active Directory :param pulumi.Input[str] username: Username of Active Directory domain administrator """ if active_directory_id is not None: pulumi.set(__self__, "active_directory_id", active_directory_id) if dns is not None: pulumi.set(__self__, "dns", dns) if domain is not None: pulumi.set(__self__, "domain", domain) if organizational_unit is not None: pulumi.set(__self__, "organizational_unit", organizational_unit) if password is not None: pulumi.set(__self__, "password", password) if smb_server_name is not None: pulumi.set(__self__, "smb_server_name", smb_server_name) if status is not None: pulumi.set(__self__, "status", status) if username is not None: pulumi.set(__self__, "username", username) @property @pulumi.getter(name="activeDirectoryId") def active_directory_id(self) -> Optional[pulumi.Input[str]]: """ Id of the Active Directory """ return pulumi.get(self, "active_directory_id") @active_directory_id.setter def active_directory_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "active_directory_id", value) @property @pulumi.getter def dns(self) -> Optional[pulumi.Input[str]]: """ Comma separated list of DNS server IP addresses for the Active Directory domain """ return pulumi.get(self, "dns") @dns.setter def dns(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dns", value) @property @pulumi.getter def domain(self) -> Optional[pulumi.Input[str]]: """ Name of the Active Directory domain """ return pulumi.get(self, "domain") @domain.setter def domain(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "domain", value) @property @pulumi.getter(name="organizationalUnit") def organizational_unit(self) -> Optional[pulumi.Input[str]]: """ The Organizational Unit (OU) within the Windows Active Directory """ return pulumi.get(self, "organizational_unit") @organizational_unit.setter def organizational_unit(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "organizational_unit", value) @property @pulumi.getter def password(self) -> Optional[pulumi.Input[str]]: """ Plain text password of Active Directory domain administrator """ return pulumi.get(self, "password") @password.setter def password(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "password", value) @property @pulumi.getter(name="smbServerName") def smb_server_name(self) -> Optional[pulumi.Input[str]]: """ NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes """ return pulumi.get(self, "smb_server_name") @smb_server_name.setter def smb_server_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "smb_server_name", value) @property @pulumi.getter def status(self) -> Optional[pulumi.Input[str]]: """ Status of the Active Directory """ return pulumi.get(self, "status") @status.setter def status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "status", value) @property @pulumi.getter def username(self) -> Optional[pulumi.Input[str]]: """ Username of Active Directory domain administrator """ return pulumi.get(self, "username") @username.setter def username(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "username", value) @pulumi.input_type class ExportPolicyRuleArgs: def __init__(__self__, *, allowed_clients: Optional[pulumi.Input[str]] = None, cifs: Optional[pulumi.Input[bool]] = None, nfsv3: Optional[pulumi.Input[bool]] = None, nfsv4: Optional[pulumi.Input[bool]] = None, rule_index: Optional[pulumi.Input[int]] = None, unix_read_only: Optional[pulumi.Input[bool]] = None, unix_read_write: Optional[pulumi.Input[bool]] = None): """ Volume Export Policy Rule :param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names :param pulumi.Input[bool] cifs: Allows CIFS protocol :param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol :param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later :param pulumi.Input[int] rule_index: Order index :param pulumi.Input[bool] unix_read_only: Read only access :param pulumi.Input[bool] unix_read_write: Read and write access """ if allowed_clients is not None: pulumi.set(__self__, "allowed_clients", allowed_clients) if cifs is not None: pulumi.set(__self__, "cifs", cifs) if nfsv3 is not None: pulumi.set(__self__, "nfsv3", nfsv3) if nfsv4 is not None: pulumi.set(__self__, "nfsv4", nfsv4) if rule_index is not None: pulumi.set(__self__, "rule_index", rule_index) if unix_read_only is not None: pulumi.set(__self__, "unix_read_only", unix_read_only) if unix_read_write is not None: pulumi.set(__self__, "unix_read_write", unix_read_write) @property @pulumi.getter(name="allowedClients") def allowed_clients(self) -> Optional[pulumi.Input[str]]: """ Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names """ return pulumi.get(self, "allowed_clients") @allowed_clients.setter def allowed_clients(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "allowed_clients", value) @property @pulumi.getter def cifs(self) -> Optional[pulumi.Input[bool]]: """ Allows CIFS protocol """ return pulumi.get(self, "cifs") @cifs.setter def cifs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "cifs", value) @property @pulumi.getter def nfsv3(self) -> Optional[pulumi.Input[bool]]: """ Allows NFSv3 protocol """ return pulumi.get(self, "nfsv3") @nfsv3.setter def nfsv3(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nfsv3", value) @property @pulumi.getter def nfsv4(self) -> Optional[pulumi.Input[bool]]: """ Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later """ return pulumi.get(self, "nfsv4") @nfsv4.setter def nfsv4(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nfsv4", value) @property @pulumi.getter(name="ruleIndex") def rule_index(self) -> Optional[pulumi.Input[int]]: """ Order index """ return pulumi.get(self, "rule_index") @rule_index.setter def rule_index(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rule_index", value) @property @pulumi.getter(name="unixReadOnly") def unix_read_only(self) -> Optional[pulumi.Input[bool]]: """ Read only access """ return pulumi.get(self, "unix_read_only") @unix_read_only.setter def unix_read_only(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "unix_read_only", value) @property @pulumi.getter(name="unixReadWrite") def unix_read_write(self) -> Optional[pulumi.Input[bool]]: """ Read and write access """ return pulumi.get(self, "unix_read_write") @unix_read_write.setter def unix_read_write(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "unix_read_write", value) @pulumi.input_type class VolumePropertiesExportPolicyArgs: def __init__(__self__, *, rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]] = None): """ Set of export policy rules :param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule """ if rules is not None: pulumi.set(__self__, "rules", rules) @property @pulumi.getter def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]: """ Export policy rule """ return pulumi.get(self, "rules") @rules.setter def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]): pulumi.set(self, "rules", value)
__init__
Volume Export Policy Rule :param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names :param pulumi.Input[bool] cifs: Allows CIFS protocol :param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol :param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later :param pulumi.Input[int] rule_index: Order index :param pulumi.Input[bool] unix_read_only: Read only access :param pulumi.Input[bool] unix_read_write: Read and write access
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = [ 'ActiveDirectoryArgs', 'ExportPolicyRuleArgs', 'VolumePropertiesExportPolicyArgs', ] @pulumi.input_type class ActiveDirectoryArgs: def __init__(__self__, *, active_directory_id: Optional[pulumi.Input[str]] = None, dns: Optional[pulumi.Input[str]] = None, domain: Optional[pulumi.Input[str]] = None, organizational_unit: Optional[pulumi.Input[str]] = None, password: Optional[pulumi.Input[str]] = None, smb_server_name: Optional[pulumi.Input[str]] = None, status: Optional[pulumi.Input[str]] = None, username: Optional[pulumi.Input[str]] = None): """ Active Directory :param pulumi.Input[str] active_directory_id: Id of the Active Directory :param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain :param pulumi.Input[str] domain: Name of the Active Directory domain :param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory :param pulumi.Input[str] password: Plain text password of Active Directory domain administrator :param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes :param pulumi.Input[str] status: Status of the Active Directory :param pulumi.Input[str] username: Username of Active Directory domain administrator """ if active_directory_id is not None: pulumi.set(__self__, "active_directory_id", active_directory_id) if dns is not None: pulumi.set(__self__, "dns", dns) if domain is not None: pulumi.set(__self__, "domain", domain) if organizational_unit is not None: pulumi.set(__self__, "organizational_unit", organizational_unit) if password is not None: pulumi.set(__self__, "password", password) if smb_server_name is not None: pulumi.set(__self__, "smb_server_name", smb_server_name) if status is not None: pulumi.set(__self__, "status", status) if username is not None: pulumi.set(__self__, "username", username) @property @pulumi.getter(name="activeDirectoryId") def active_directory_id(self) -> Optional[pulumi.Input[str]]: """ Id of the Active Directory """ return pulumi.get(self, "active_directory_id") @active_directory_id.setter def active_directory_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "active_directory_id", value) @property @pulumi.getter def dns(self) -> Optional[pulumi.Input[str]]: """ Comma separated list of DNS server IP addresses for the Active Directory domain """ return pulumi.get(self, "dns") @dns.setter def dns(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dns", value) @property @pulumi.getter def domain(self) -> Optional[pulumi.Input[str]]: """ Name of the Active Directory domain """ return pulumi.get(self, "domain") @domain.setter def domain(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "domain", value) @property @pulumi.getter(name="organizationalUnit") def organizational_unit(self) -> Optional[pulumi.Input[str]]: """ The Organizational Unit (OU) within the Windows Active Directory """ return pulumi.get(self, "organizational_unit") @organizational_unit.setter def organizational_unit(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "organizational_unit", value) @property @pulumi.getter def password(self) -> Optional[pulumi.Input[str]]: """ Plain text password of Active Directory domain administrator """ return pulumi.get(self, "password") @password.setter def password(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "password", value) @property @pulumi.getter(name="smbServerName") def smb_server_name(self) -> Optional[pulumi.Input[str]]: """ NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes """ return pulumi.get(self, "smb_server_name") @smb_server_name.setter def smb_server_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "smb_server_name", value) @property @pulumi.getter def status(self) -> Optional[pulumi.Input[str]]: """ Status of the Active Directory """ return pulumi.get(self, "status") @status.setter def status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "status", value) @property @pulumi.getter def username(self) -> Optional[pulumi.Input[str]]: """ Username of Active Directory domain administrator """ return pulumi.get(self, "username") @username.setter def username(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "username", value) @pulumi.input_type class ExportPolicyRuleArgs: # MASKED: __init__ function (lines 156-187) @property @pulumi.getter(name="allowedClients") def allowed_clients(self) -> Optional[pulumi.Input[str]]: """ Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names """ return pulumi.get(self, "allowed_clients") @allowed_clients.setter def allowed_clients(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "allowed_clients", value) @property @pulumi.getter def cifs(self) -> Optional[pulumi.Input[bool]]: """ Allows CIFS protocol """ return pulumi.get(self, "cifs") @cifs.setter def cifs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "cifs", value) @property @pulumi.getter def nfsv3(self) -> Optional[pulumi.Input[bool]]: """ Allows NFSv3 protocol """ return pulumi.get(self, "nfsv3") @nfsv3.setter def nfsv3(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nfsv3", value) @property @pulumi.getter def nfsv4(self) -> Optional[pulumi.Input[bool]]: """ Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later """ return pulumi.get(self, "nfsv4") @nfsv4.setter def nfsv4(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nfsv4", value) @property @pulumi.getter(name="ruleIndex") def rule_index(self) -> Optional[pulumi.Input[int]]: """ Order index """ return pulumi.get(self, "rule_index") @rule_index.setter def rule_index(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rule_index", value) @property @pulumi.getter(name="unixReadOnly") def unix_read_only(self) -> Optional[pulumi.Input[bool]]: """ Read only access """ return pulumi.get(self, "unix_read_only") @unix_read_only.setter def unix_read_only(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "unix_read_only", value) @property @pulumi.getter(name="unixReadWrite") def unix_read_write(self) -> Optional[pulumi.Input[bool]]: """ Read and write access """ return pulumi.get(self, "unix_read_write") @unix_read_write.setter def unix_read_write(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "unix_read_write", value) @pulumi.input_type class VolumePropertiesExportPolicyArgs: def __init__(__self__, *, rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]] = None): """ Set of export policy rules :param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule """ if rules is not None: pulumi.set(__self__, "rules", rules) @property @pulumi.getter def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]: """ Export policy rule """ return pulumi.get(self, "rules") @rules.setter def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]): pulumi.set(self, "rules", value)
def __init__(__self__, *, allowed_clients: Optional[pulumi.Input[str]] = None, cifs: Optional[pulumi.Input[bool]] = None, nfsv3: Optional[pulumi.Input[bool]] = None, nfsv4: Optional[pulumi.Input[bool]] = None, rule_index: Optional[pulumi.Input[int]] = None, unix_read_only: Optional[pulumi.Input[bool]] = None, unix_read_write: Optional[pulumi.Input[bool]] = None): """ Volume Export Policy Rule :param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names :param pulumi.Input[bool] cifs: Allows CIFS protocol :param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol :param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later :param pulumi.Input[int] rule_index: Order index :param pulumi.Input[bool] unix_read_only: Read only access :param pulumi.Input[bool] unix_read_write: Read and write access """ if allowed_clients is not None: pulumi.set(__self__, "allowed_clients", allowed_clients) if cifs is not None: pulumi.set(__self__, "cifs", cifs) if nfsv3 is not None: pulumi.set(__self__, "nfsv3", nfsv3) if nfsv4 is not None: pulumi.set(__self__, "nfsv4", nfsv4) if rule_index is not None: pulumi.set(__self__, "rule_index", rule_index) if unix_read_only is not None: pulumi.set(__self__, "unix_read_only", unix_read_only) if unix_read_write is not None: pulumi.set(__self__, "unix_read_write", unix_read_write)
156
187
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = [ 'ActiveDirectoryArgs', 'ExportPolicyRuleArgs', 'VolumePropertiesExportPolicyArgs', ] @pulumi.input_type class ActiveDirectoryArgs: def __init__(__self__, *, active_directory_id: Optional[pulumi.Input[str]] = None, dns: Optional[pulumi.Input[str]] = None, domain: Optional[pulumi.Input[str]] = None, organizational_unit: Optional[pulumi.Input[str]] = None, password: Optional[pulumi.Input[str]] = None, smb_server_name: Optional[pulumi.Input[str]] = None, status: Optional[pulumi.Input[str]] = None, username: Optional[pulumi.Input[str]] = None): """ Active Directory :param pulumi.Input[str] active_directory_id: Id of the Active Directory :param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain :param pulumi.Input[str] domain: Name of the Active Directory domain :param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory :param pulumi.Input[str] password: Plain text password of Active Directory domain administrator :param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes :param pulumi.Input[str] status: Status of the Active Directory :param pulumi.Input[str] username: Username of Active Directory domain administrator """ if active_directory_id is not None: pulumi.set(__self__, "active_directory_id", active_directory_id) if dns is not None: pulumi.set(__self__, "dns", dns) if domain is not None: pulumi.set(__self__, "domain", domain) if organizational_unit is not None: pulumi.set(__self__, "organizational_unit", organizational_unit) if password is not None: pulumi.set(__self__, "password", password) if smb_server_name is not None: pulumi.set(__self__, "smb_server_name", smb_server_name) if status is not None: pulumi.set(__self__, "status", status) if username is not None: pulumi.set(__self__, "username", username) @property @pulumi.getter(name="activeDirectoryId") def active_directory_id(self) -> Optional[pulumi.Input[str]]: """ Id of the Active Directory """ return pulumi.get(self, "active_directory_id") @active_directory_id.setter def active_directory_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "active_directory_id", value) @property @pulumi.getter def dns(self) -> Optional[pulumi.Input[str]]: """ Comma separated list of DNS server IP addresses for the Active Directory domain """ return pulumi.get(self, "dns") @dns.setter def dns(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "dns", value) @property @pulumi.getter def domain(self) -> Optional[pulumi.Input[str]]: """ Name of the Active Directory domain """ return pulumi.get(self, "domain") @domain.setter def domain(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "domain", value) @property @pulumi.getter(name="organizationalUnit") def organizational_unit(self) -> Optional[pulumi.Input[str]]: """ The Organizational Unit (OU) within the Windows Active Directory """ return pulumi.get(self, "organizational_unit") @organizational_unit.setter def organizational_unit(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "organizational_unit", value) @property @pulumi.getter def password(self) -> Optional[pulumi.Input[str]]: """ Plain text password of Active Directory domain administrator """ return pulumi.get(self, "password") @password.setter def password(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "password", value) @property @pulumi.getter(name="smbServerName") def smb_server_name(self) -> Optional[pulumi.Input[str]]: """ NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes """ return pulumi.get(self, "smb_server_name") @smb_server_name.setter def smb_server_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "smb_server_name", value) @property @pulumi.getter def status(self) -> Optional[pulumi.Input[str]]: """ Status of the Active Directory """ return pulumi.get(self, "status") @status.setter def status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "status", value) @property @pulumi.getter def username(self) -> Optional[pulumi.Input[str]]: """ Username of Active Directory domain administrator """ return pulumi.get(self, "username") @username.setter def username(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "username", value) @pulumi.input_type class ExportPolicyRuleArgs: def __init__(__self__, *, allowed_clients: Optional[pulumi.Input[str]] = None, cifs: Optional[pulumi.Input[bool]] = None, nfsv3: Optional[pulumi.Input[bool]] = None, nfsv4: Optional[pulumi.Input[bool]] = None, rule_index: Optional[pulumi.Input[int]] = None, unix_read_only: Optional[pulumi.Input[bool]] = None, unix_read_write: Optional[pulumi.Input[bool]] = None): """ Volume Export Policy Rule :param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names :param pulumi.Input[bool] cifs: Allows CIFS protocol :param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol :param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later :param pulumi.Input[int] rule_index: Order index :param pulumi.Input[bool] unix_read_only: Read only access :param pulumi.Input[bool] unix_read_write: Read and write access """ if allowed_clients is not None: pulumi.set(__self__, "allowed_clients", allowed_clients) if cifs is not None: pulumi.set(__self__, "cifs", cifs) if nfsv3 is not None: pulumi.set(__self__, "nfsv3", nfsv3) if nfsv4 is not None: pulumi.set(__self__, "nfsv4", nfsv4) if rule_index is not None: pulumi.set(__self__, "rule_index", rule_index) if unix_read_only is not None: pulumi.set(__self__, "unix_read_only", unix_read_only) if unix_read_write is not None: pulumi.set(__self__, "unix_read_write", unix_read_write) @property @pulumi.getter(name="allowedClients") def allowed_clients(self) -> Optional[pulumi.Input[str]]: """ Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names """ return pulumi.get(self, "allowed_clients") @allowed_clients.setter def allowed_clients(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "allowed_clients", value) @property @pulumi.getter def cifs(self) -> Optional[pulumi.Input[bool]]: """ Allows CIFS protocol """ return pulumi.get(self, "cifs") @cifs.setter def cifs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "cifs", value) @property @pulumi.getter def nfsv3(self) -> Optional[pulumi.Input[bool]]: """ Allows NFSv3 protocol """ return pulumi.get(self, "nfsv3") @nfsv3.setter def nfsv3(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nfsv3", value) @property @pulumi.getter def nfsv4(self) -> Optional[pulumi.Input[bool]]: """ Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later """ return pulumi.get(self, "nfsv4") @nfsv4.setter def nfsv4(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nfsv4", value) @property @pulumi.getter(name="ruleIndex") def rule_index(self) -> Optional[pulumi.Input[int]]: """ Order index """ return pulumi.get(self, "rule_index") @rule_index.setter def rule_index(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rule_index", value) @property @pulumi.getter(name="unixReadOnly") def unix_read_only(self) -> Optional[pulumi.Input[bool]]: """ Read only access """ return pulumi.get(self, "unix_read_only") @unix_read_only.setter def unix_read_only(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "unix_read_only", value) @property @pulumi.getter(name="unixReadWrite") def unix_read_write(self) -> Optional[pulumi.Input[bool]]: """ Read and write access """ return pulumi.get(self, "unix_read_write") @unix_read_write.setter def unix_read_write(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "unix_read_write", value) @pulumi.input_type class VolumePropertiesExportPolicyArgs: def __init__(__self__, *, rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]] = None): """ Set of export policy rules :param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule """ if rules is not None: pulumi.set(__self__, "rules", rules) @property @pulumi.getter def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]: """ Export policy rule """ return pulumi.get(self, "rules") @rules.setter def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]): pulumi.set(self, "rules", value)
__init__
Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content.
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from __future__ import absolute_import from __future__ import unicode_literals from core import feconf from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain from core.domain import html_validation_service from core.domain import state_domain from core.platform import models (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(python_utils.OBJECT): """Domain object for the contents on a subtopic page.""" # MASKED: __init__ function (lines 77-92) def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: dict. A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ page_contents = state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']) page_contents.validate() return cls( page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(python_utils.OBJECT): """Domain object for a Subtopic page.""" def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: dict. A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) @classmethod def convert_html_fields_in_subtopic_page_contents( cls, subtopic_page_contents_dict, conversion_fn): """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict. """ subtopic_page_contents_dict['written_translations'] = ( state_domain.WrittenTranslations. convert_html_in_written_translations( subtopic_page_contents_dict['written_translations'], conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict @classmethod def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) @classmethod def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any( self.language_code == lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations
77
92
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from __future__ import absolute_import from __future__ import unicode_literals from core import feconf from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain from core.domain import html_validation_service from core.domain import state_domain from core.platform import models (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(python_utils.OBJECT): """Domain object for the contents on a subtopic page.""" def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: dict. A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ page_contents = state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']) page_contents.validate() return cls( page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(python_utils.OBJECT): """Domain object for a Subtopic page.""" def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: dict. A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) @classmethod def convert_html_fields_in_subtopic_page_contents( cls, subtopic_page_contents_dict, conversion_fn): """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict. """ subtopic_page_contents_dict['written_translations'] = ( state_domain.WrittenTranslations. convert_html_in_written_translations( subtopic_page_contents_dict['written_translations'], conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict @classmethod def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) @classmethod def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any( self.language_code == lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
__init__
Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic.
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from __future__ import absolute_import from __future__ import unicode_literals from core import feconf from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain from core.domain import html_validation_service from core.domain import state_domain from core.platform import models (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(python_utils.OBJECT): """Domain object for the contents on a subtopic page.""" def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: dict. A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ page_contents = state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']) page_contents.validate() return cls( page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(python_utils.OBJECT): """Domain object for a Subtopic page.""" # MASKED: __init__ function (lines 156-177) def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: dict. A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) @classmethod def convert_html_fields_in_subtopic_page_contents( cls, subtopic_page_contents_dict, conversion_fn): """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict. """ subtopic_page_contents_dict['written_translations'] = ( state_domain.WrittenTranslations. convert_html_in_written_translations( subtopic_page_contents_dict['written_translations'], conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict @classmethod def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) @classmethod def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any( self.language_code == lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version
156
177
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from __future__ import absolute_import from __future__ import unicode_literals from core import feconf from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain from core.domain import html_validation_service from core.domain import state_domain from core.platform import models (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(python_utils.OBJECT): """Domain object for the contents on a subtopic page.""" def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: dict. A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ page_contents = state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']) page_contents.validate() return cls( page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(python_utils.OBJECT): """Domain object for a Subtopic page.""" def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: dict. A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) @classmethod def convert_html_fields_in_subtopic_page_contents( cls, subtopic_page_contents_dict, conversion_fn): """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict. """ subtopic_page_contents_dict['written_translations'] = ( state_domain.WrittenTranslations. convert_html_in_written_translations( subtopic_page_contents_dict['written_translations'], conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict @classmethod def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) @classmethod def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any( self.language_code == lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
convert_html_fields_in_subtopic_page_contents
Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict.
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from __future__ import absolute_import from __future__ import unicode_literals from core import feconf from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain from core.domain import html_validation_service from core.domain import state_domain from core.platform import models (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(python_utils.OBJECT): """Domain object for the contents on a subtopic page.""" def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: dict. A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ page_contents = state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']) page_contents.validate() return cls( page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(python_utils.OBJECT): """Domain object for a Subtopic page.""" def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: dict. A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) # MASKED: convert_html_fields_in_subtopic_page_contents function (lines 227-250) @classmethod def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) @classmethod def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any( self.language_code == lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
@classmethod def convert_html_fields_in_subtopic_page_contents( cls, subtopic_page_contents_dict, conversion_fn): """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict. """ subtopic_page_contents_dict['written_translations'] = ( state_domain.WrittenTranslations. convert_html_in_written_translations( subtopic_page_contents_dict['written_translations'], conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict
227
250
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from __future__ import absolute_import from __future__ import unicode_literals from core import feconf from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain from core.domain import html_validation_service from core.domain import state_domain from core.platform import models (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(python_utils.OBJECT): """Domain object for the contents on a subtopic page.""" def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: dict. A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ page_contents = state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']) page_contents.validate() return cls( page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(python_utils.OBJECT): """Domain object for a Subtopic page.""" def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: dict. A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) @classmethod def convert_html_fields_in_subtopic_page_contents( cls, subtopic_page_contents_dict, conversion_fn): """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict. """ subtopic_page_contents_dict['written_translations'] = ( state_domain.WrittenTranslations. convert_html_in_written_translations( subtopic_page_contents_dict['written_translations'], conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict @classmethod def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) @classmethod def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any( self.language_code == lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
validate
Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid.
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from __future__ import absolute_import from __future__ import unicode_literals from core import feconf from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain from core.domain import html_validation_service from core.domain import state_domain from core.platform import models (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(python_utils.OBJECT): """Domain object for the contents on a subtopic page.""" def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: dict. A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ page_contents = state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']) page_contents.validate() return cls( page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(python_utils.OBJECT): """Domain object for a Subtopic page.""" def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: dict. A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) @classmethod def convert_html_fields_in_subtopic_page_contents( cls, subtopic_page_contents_dict, conversion_fn): """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict. """ subtopic_page_contents_dict['written_translations'] = ( state_domain.WrittenTranslations. convert_html_in_written_translations( subtopic_page_contents_dict['written_translations'], conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict @classmethod def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) @classmethod def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) # MASKED: validate function (lines 363-403)
def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any( self.language_code == lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
363
403
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from __future__ import absolute_import from __future__ import unicode_literals from core import feconf from core import python_utils from core import utils from core.constants import constants from core.domain import change_domain from core.domain import html_validation_service from core.domain import state_domain from core.platform import models (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(python_utils.OBJECT): """Domain object for the contents on a subtopic page.""" def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: dict. A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ page_contents = state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']) page_contents.validate() return cls( page_contents, state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(python_utils.OBJECT): """Domain object for a Subtopic page.""" def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: dict. A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) @classmethod def convert_html_fields_in_subtopic_page_contents( cls, subtopic_page_contents_dict, conversion_fn): """Applies a conversion function on all the html strings in subtopic page contents to migrate them to a desired state. Args: subtopic_page_contents_dict: dict. The dict representation of subtopic page contents. conversion_fn: function. The conversion function to be applied on the subtopic_page_contents_dict. Returns: dict. The converted subtopic_page_contents_dict. """ subtopic_page_contents_dict['written_translations'] = ( state_domain.WrittenTranslations. convert_html_in_written_translations( subtopic_page_contents_dict['written_translations'], conversion_fn)) subtopic_page_contents_dict['subtitled_html']['html'] = ( conversion_fn( subtopic_page_contents_dict['subtitled_html']['html'])) return subtopic_page_contents_dict @classmethod def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict): """Converts v1 SubtopicPage Contents schema to the v2 schema. v2 schema introduces the new schema for Math components. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.add_math_content_to_math_rte_components) @classmethod def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict): """Converts v2 SubtopicPage Contents schema to the v3 schema. v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts existing occurences of it to oppia-noninteractive-image tag. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.convert_svg_diagram_tags_to_image_tags) @classmethod def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict): """Converts v3 SubtopicPage Contents schema to the v4 schema. v4 schema fixes HTML encoding issues. Args: page_contents_dict: dict. A dict used to initialize a SubtopicPage domain object. Returns: dict. The converted page_contents_dict. """ return cls.convert_html_fields_in_subtopic_page_contents( page_contents_dict, html_validation_service.fix_incorrectly_encoded_chars) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_page_contents dict from current_version to current_version + 1. Note that the versioned_page_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError. One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any( self.language_code == lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES ): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
checkFormatVisitor
Run checkFormat in parallel for the given files. Args: arg: a tuple (pool, result_list, owned_directories, error_messages) pool and result_list are for starting tasks asynchronously. owned_directories tracks directories listed in the CODEOWNERS file. error_messages is a list of string format errors. dir_name: the parent directory of the given files. names: a list of file names.
#!/usr/bin/env python3 import argparse import common import functools import multiprocessing import os import os.path import pathlib import re import subprocess import stat import sys import traceback import shutil import paths EXCLUDED_PREFIXES = ("./generated/", "./thirdparty/", "./build", "./.git/", "./bazel-", "./.cache", "./source/extensions/extensions_build_config.bzl", "./bazel/toolchains/configs/", "./tools/testdata/check_format/", "./tools/pyformat/") SUFFIXES = ("BUILD", "WORKSPACE", ".bzl", ".cc", ".h", ".java", ".m", ".md", ".mm", ".proto", ".rst") DOCS_SUFFIX = (".md", ".rst") PROTO_SUFFIX = (".proto") # Files in these paths can make reference to protobuf stuff directly GOOGLE_PROTOBUF_WHITELIST = ("ci/prebuilt", "source/common/protobuf", "api/test") REPOSITORIES_BZL = "bazel/repositories.bzl" # Files matching these exact names can reference real-world time. These include the class # definitions for real-world time, the construction of them in main(), and perf annotation. # For now it includes the validation server but that really should be injected too. REAL_TIME_WHITELIST = ("./source/common/common/utility.h", "./source/extensions/filters/http/common/aws/utility.cc", "./source/common/event/real_time_system.cc", "./source/common/event/real_time_system.h", "./source/exe/main_common.cc", "./source/exe/main_common.h", "./source/server/config_validation/server.cc", "./source/common/common/perf_annotation.h", "./test/test_common/simulated_time_system.cc", "./test/test_common/simulated_time_system.h", "./test/test_common/test_time.cc", "./test/test_common/test_time.h", "./test/test_common/utility.cc", "./test/test_common/utility.h", "./test/integration/integration.h") # Files in these paths can use MessageLite::SerializeAsString SERIALIZE_AS_STRING_WHITELIST = ( "./source/common/config/version_converter.cc", "./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc", "./test/common/protobuf/utility_test.cc", "./test/common/grpc/codec_test.cc", "./test/common/grpc/codec_fuzz_test.cc", ) # Files in these paths can use Protobuf::util::JsonStringToMessage JSON_STRING_TO_MESSAGE_WHITELIST = ("./source/common/protobuf/utility.cc") # Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing # ones were grandfathered as part of PR #8484 for backwards compatibility. HISTOGRAM_WITH_SI_SUFFIX_WHITELIST = ("downstream_cx_length_ms", "downstream_cx_length_ms", "initialization_time_ms", "loop_duration_us", "poll_delay_us", "request_time_ms", "upstream_cx_connect_ms", "upstream_cx_length_ms") # Files in these paths can use std::regex STD_REGEX_WHITELIST = ("./source/common/common/utility.cc", "./source/common/common/regex.h", "./source/common/common/regex.cc", "./source/common/stats/tag_extractor_impl.h", "./source/common/stats/tag_extractor_impl.cc", "./source/common/access_log/access_log_formatter.cc", "./source/extensions/filters/http/squash/squash_filter.h", "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/http/admin.h", "./source/server/http/admin.cc", "./tools/clang_tools/api_booster/main.cc", "./tools/clang_tools/api_booster/proto_cxx_utils.cc") # Only one C++ file should instantiate grpc_init GRPC_INIT_WHITELIST = ("./source/common/grpc/google_grpc_context.cc") CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-9") BUILDIFIER_PATH = paths.getBuildifier() BUILDOZER_PATH = paths.getBuildozer() ENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "envoy_build_fixer.py") HEADER_ORDER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "header_order.py") SUBDIR_SET = set(common.includeDirOrder()) INCLUDE_ANGLE = "#include <" INCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE) PROTO_PACKAGE_REGEX = re.compile(r"^package (\S+);\n*", re.MULTILINE) X_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\"x-envoy-.*\".*') # yapf: disable PROTOBUF_TYPE_ERRORS = { # Well-known types should be referenced from the ProtobufWkt namespace. "Protobuf::Any": "ProtobufWkt::Any", "Protobuf::Empty": "ProtobufWkt::Empty", "Protobuf::ListValue": "ProtobufWkt::ListValue", "Protobuf::NULL_VALUE": "ProtobufWkt::NULL_VALUE", "Protobuf::StringValue": "ProtobufWkt::StringValue", "Protobuf::Struct": "ProtobufWkt::Struct", "Protobuf::Value": "ProtobufWkt::Value", # Other common mis-namespacing of protobuf types. "ProtobufWkt::Map": "Protobuf::Map", "ProtobufWkt::MapPair": "Protobuf::MapPair", "ProtobufUtil::MessageDifferencer": "Protobuf::util::MessageDifferencer" } LIBCXX_REPLACEMENTS = { "absl::make_unique<": "std::make_unique<", } UNOWNED_EXTENSIONS = { "extensions/filters/http/ratelimit", "extensions/filters/http/buffer", "extensions/filters/http/rbac", "extensions/filters/http/ip_tagging", "extensions/filters/http/tap", "extensions/filters/http/health_check", "extensions/filters/http/cors", "extensions/filters/http/ext_authz", "extensions/filters/http/dynamo", "extensions/filters/http/lua", "extensions/filters/http/common", "extensions/filters/common", "extensions/filters/common/ratelimit", "extensions/filters/common/rbac", "extensions/filters/common/lua", "extensions/filters/listener/original_dst", "extensions/filters/listener/proxy_protocol", "extensions/stat_sinks/statsd", "extensions/stat_sinks/common", "extensions/stat_sinks/common/statsd", "extensions/health_checkers/redis", "extensions/access_loggers/grpc", "extensions/access_loggers/file", "extensions/common/tap", "extensions/transport_sockets/raw_buffer", "extensions/transport_sockets/tap", "extensions/tracers/zipkin", "extensions/tracers/dynamic_ot", "extensions/tracers/opencensus", "extensions/tracers/lightstep", "extensions/tracers/common", "extensions/tracers/common/ot", "extensions/retry/host/previous_hosts", "extensions/filters/network/ratelimit", "extensions/filters/network/client_ssl_auth", "extensions/filters/network/rbac", "extensions/filters/network/tcp_proxy", "extensions/filters/network/echo", "extensions/filters/network/ext_authz", "extensions/filters/network/redis_proxy", "extensions/filters/network/kafka", "extensions/filters/network/kafka/protocol", "extensions/filters/network/kafka/serialization", "extensions/filters/network/mongo_proxy", "extensions/filters/network/common", "extensions/filters/network/common/redis", } # yapf: enable # Map a line transformation function across each line of a file. # .bak temporaries. def replaceLines(path, line_xform): # We used to use fileinput in the older Python 2.7 script, but this doesn't do # inplace mode and UTF-8 in Python 3, so doing it the manual way. output_lines = [line_xform(line) for line in readLines(path)] pathlib.Path(path).write_text('\n'.join(output_lines), encoding='utf-8') # Obtain all the lines in a given file. def readLines(path): return readFile(path).split('\n') # Read a UTF-8 encoded file as a str. def readFile(path): return pathlib.Path(path).read_text(encoding='utf-8') # lookPath searches for the given executable in all directories in PATH # environment variable. If it cannot be found, empty string is returned. def lookPath(executable): for path_dir in os.environ["PATH"].split(os.pathsep): executable_path = os.path.join(path_dir, executable) if os.path.exists(executable_path): return executable_path return "" # pathExists checks whether the given path exists. This function assumes that # the path is absolute and evaluates environment variables. def pathExists(executable): return os.path.exists(os.path.expandvars(executable)) # executableByOthers checks whether the given path has execute permission for # others. def executableByOthers(executable): st = os.stat(os.path.expandvars(executable)) return bool(st.st_mode & stat.S_IXOTH) # Check whether all needed external tools (clang-format, buildifier, buildozer) are # available. def checkTools(): error_messages = [] clang_format_abs_path = lookPath(CLANG_FORMAT_PATH) if clang_format_abs_path: if not executableByOthers(clang_format_abs_path): error_messages.append("command {} exists, but cannot be executed by other " "users".format(CLANG_FORMAT_PATH)) else: error_messages.append( "Command {} not found. If you have clang-format in version 8.x.x " "installed, but the binary name is different or it's not available in " "PATH, please use CLANG_FORMAT environment variable to specify the path. " "Examples:\n" " export CLANG_FORMAT=clang-format-9.0.0\n" " export CLANG_FORMAT=/opt/bin/clang-format-9\n" " export CLANG_FORMAT=/usr/local/opt/llvm@9/bin/clang-format".format(CLANG_FORMAT_PATH)) def checkBazelTool(name, path, var): bazel_tool_abs_path = lookPath(path) if bazel_tool_abs_path: if not executableByOthers(bazel_tool_abs_path): error_messages.append("command {} exists, but cannot be executed by other " "users".format(path)) elif pathExists(path): if not executableByOthers(path): error_messages.append("command {} exists, but cannot be executed by other " "users".format(path)) else: error_messages.append( "Command {} not found. If you have buildifier installed, but the binary " "name is different or it's not available in $GOPATH/bin, please use " "{} environment variable to specify the path. Example:\n" " export {}=/opt/bin/buildifier\n" "If you don't have buildifier installed, you can install it by:\n" " go get -u github.com/bazelbuild/buildtools/{}".format(path, var, var, name)) checkBazelTool('buildifier', BUILDIFIER_PATH, 'BUILDIFIER_BIN') checkBazelTool('buildozer', BUILDOZER_PATH, 'BUILDOZER_BIN') return error_messages def checkNamespace(file_path): for excluded_path in namespace_check_excluded_paths: if file_path.startswith(excluded_path): return [] nolint = "NOLINT(namespace-%s)" % namespace_check.lower() text = readFile(file_path) if not re.search("^\s*namespace\s+%s\s*{" % namespace_check, text, re.MULTILINE) and \ not nolint in text: return ["Unable to find %s namespace or %s for file: %s" % (namespace_check, nolint, file_path)] return [] def packageNameForProto(file_path): package_name = None error_message = [] result = PROTO_PACKAGE_REGEX.search(readFile(file_path)) if result is not None and len(result.groups()) == 1: package_name = result.group(1) if package_name is None: error_message = ["Unable to find package name for proto file: %s" % file_path] return [package_name, error_message] # To avoid breaking the Lyft import, we just check for path inclusion here. def whitelistedForProtobufDeps(file_path): return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \ any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_WHITELIST)) # Real-world time sources should not be instantiated in the source, except for a few # specific cases. They should be passed down from where they are instantied to where # they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager. def whitelistedForRealTime(file_path): if file_path.endswith(".md"): return True return file_path in REAL_TIME_WHITELIST def whitelistedForSerializeAsString(file_path): return file_path in SERIALIZE_AS_STRING_WHITELIST def whitelistedForJsonStringToMessage(file_path): return file_path in JSON_STRING_TO_MESSAGE_WHITELIST def whitelistedForHistogramSiSuffix(name): return name in HISTOGRAM_WITH_SI_SUFFIX_WHITELIST def whitelistedForStdRegex(file_path): return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST or file_path.endswith( DOCS_SUFFIX) def whitelistedForGrpcInit(file_path): return file_path in GRPC_INIT_WHITELIST def whitelistedForUnpackTo(file_path): return file_path.startswith("./test") or file_path in [ "./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h" ] def findSubstringAndReturnError(pattern, file_path, error_message): text = readFile(file_path) if pattern in text: error_messages = [file_path + ": " + error_message] for i, line in enumerate(text.splitlines()): if pattern in line: error_messages.append(" %s:%s" % (file_path, i + 1)) return error_messages return [] def errorIfNoSubstringFound(pattern, file_path, error_message): return [] if pattern in readFile(file_path) else [file_path + ": " + error_message] def isApiFile(file_path): return file_path.startswith(args.api_prefix) or file_path.startswith(args.api_shadow_prefix) def isBuildFile(file_path): basename = os.path.basename(file_path) if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"): return True return False def isExternalBuildFile(file_path): return isBuildFile(file_path) and (file_path.startswith("./bazel/external/") or file_path.startswith("./tools/clang_tools")) def isSkylarkFile(file_path): return file_path.endswith(".bzl") def isWorkspaceFile(file_path): return os.path.basename(file_path) == "WORKSPACE" def isBuildFixerExcludedFile(file_path): for excluded_path in build_fixer_check_excluded_paths: if file_path.startswith(excluded_path): return True return False def hasInvalidAngleBracketDirectory(line): if not line.startswith(INCLUDE_ANGLE): return False path = line[INCLUDE_ANGLE_LEN:] slash = path.find("/") if slash == -1: return False subdir = path[0:slash] return subdir in SUBDIR_SET VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* [a-z \-_]*: [a-z:`]") VERSION_HISTORY_NEW_RELEASE_REGEX = re.compile("^====[=]+$") def checkCurrentReleaseNotes(file_path, error_messages): in_current_release = False for line_number, line in enumerate(readLines(file_path)): def reportError(message): error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) if VERSION_HISTORY_NEW_RELEASE_REGEX.match(line): # If we were in the section for the current release this means we have passed it. if in_current_release: break # If we see a version marker we are now in the section for the current release. in_current_release = True if line.startswith("*") and not VERSION_HISTORY_NEW_LINE_REGEX.match(line): reportError("Version history line malformed. " "Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" % line) def checkFileContents(file_path, checker): error_messages = [] if file_path.endswith("version_history.rst"): # Version file checking has enough special cased logic to merit its own checks. # This only validates entries for the current release as very old release # notes have a different format. checkCurrentReleaseNotes(file_path, error_messages) for line_number, line in enumerate(readLines(file_path)): def reportError(message): error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) checker(line, file_path, reportError) return error_messages DOT_MULTI_SPACE_REGEX = re.compile("\\. +") def fixSourceLine(line): # Strip double space after '.' This may prove overenthusiastic and need to # be restricted to comments and metadata files but works for now. line = re.sub(DOT_MULTI_SPACE_REGEX, ". ", line) if hasInvalidAngleBracketDirectory(line): line = line.replace("<", '"').replace(">", '"') # Fix incorrect protobuf namespace references. for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items(): line = line.replace(invalid_construct, valid_construct) # Use recommended cpp stdlib for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items(): line = line.replace(invalid_construct, valid_construct) return line # We want to look for a call to condvar.waitFor, but there's no strong pattern # to the variable name of the condvar. If we just look for ".waitFor" we'll also # pick up time_system_.waitFor(...), and we don't want to return true for that # pattern. But in that case there is a strong pattern of using time_system in # various spellings as the variable name. def hasCondVarWaitFor(line): wait_for = line.find(".waitFor(") if wait_for == -1: return False preceding = line[0:wait_for] if preceding.endswith("time_system") or preceding.endswith("timeSystem()") or \ preceding.endswith("time_system_"): return False return True # Determines whether the filename is either in the specified subdirectory, or # at the top level. We consider files in the top level for the benefit of # the check_format testcases in tools/testdata/check_format. def isInSubdir(filename, *subdirs): # Skip this check for check_format's unit-tests. if filename.count("/") <= 1: return True for subdir in subdirs: if filename.startswith('./' + subdir + '/'): return True return False def checkSourceLine(line, file_path, reportError): # Check fixable errors. These may have been fixed already. if line.find(". ") != -1: reportError("over-enthusiastic spaces") if isInSubdir(file_path, 'source', 'include') and X_ENVOY_USED_DIRECTLY_REGEX.match(line): reportError( "Please do not use the raw literal x-envoy in source code. See Envoy::Http::PrefixValue.") if hasInvalidAngleBracketDirectory(line): reportError("envoy includes should not have angle brackets") for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items(): if invalid_construct in line: reportError("incorrect protobuf type reference %s; " "should be %s" % (invalid_construct, valid_construct)) for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items(): if invalid_construct in line: reportError("term %s should be replaced with standard library term %s" % (invalid_construct, valid_construct)) # Do not include the virtual_includes headers. if re.search("#include.*/_virtual_includes/", line): reportError("Don't include the virtual includes headers.") # Some errors cannot be fixed automatically, and actionable, consistent, # navigable messages should be emitted to make it easy to find and fix # the errors by hand. if not whitelistedForProtobufDeps(file_path): if '"google/protobuf' in line or "google::protobuf" in line: reportError("unexpected direct dependency on google.protobuf, use " "the definitions in common/protobuf/protobuf.h instead.") if line.startswith("#include <mutex>") or line.startswith("#include <condition_variable"): # We don't check here for std::mutex because that may legitimately show up in # comments, for example this one. reportError("Don't use <mutex> or <condition_variable*>, switch to " "Thread::MutexBasicLockable in source/common/common/thread.h") if line.startswith("#include <shared_mutex>"): # We don't check here for std::shared_timed_mutex because that may # legitimately show up in comments, for example this one. reportError("Don't use <shared_mutex>, use absl::Mutex for reader/writer locks.") if not whitelistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line: if "RealTimeSource" in line or \ ("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \ "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \ "std::this_thread::sleep_for" in line or hasCondVarWaitFor(line): reportError("Don't reference real-world time sources from production code; use injection") if not whitelistedForUnpackTo(file_path): if "UnpackTo" in line: reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead") # Check that we use the absl::Time library if "std::get_time" in line: if "test/" in file_path: reportError("Don't use std::get_time; use TestUtility::parseTime in tests") else: reportError("Don't use std::get_time; use the injectable time system") if "std::put_time" in line: reportError("Don't use std::put_time; use absl::Time equivalent instead") if "gmtime" in line: reportError("Don't use gmtime; use absl::Time equivalent instead") if "mktime" in line: reportError("Don't use mktime; use absl::Time equivalent instead") if "localtime" in line: reportError("Don't use localtime; use absl::Time equivalent instead") if "strftime" in line: reportError("Don't use strftime; use absl::FormatTime instead") if "strptime" in line: reportError("Don't use strptime; use absl::FormatTime instead") if "std::atomic_" in line: # The std::atomic_* free functions are functionally equivalent to calling # operations on std::atomic<T> objects, so prefer to use that instead. reportError("Don't use free std::atomic_* functions, use std::atomic<T> members instead.") if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h": # __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that # can be used instead reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined " "in include/envoy/common/platform.h instead") if re.search("\{\s*\.\w+\s*\=", line): # Designated initializers are not part of the C++14 standard and are not supported # by MSVC reportError("Don't use designated initializers in struct initialization, " "they are not part of C++14") if " ?: " in line: # The ?: operator is non-standard, it is a GCC extension reportError("Don't use the '?:' operator, it is a non-standard GCC extension") if line.startswith("using testing::Test;"): reportError("Don't use 'using testing::Test;, elaborate the type instead") if line.startswith("using testing::TestWithParams;"): reportError("Don't use 'using testing::Test;, elaborate the type instead") if not whitelistedForSerializeAsString(file_path) and "SerializeAsString" in line: # The MessageLite::SerializeAsString doesn't generate deterministic serialization, # use MessageUtil::hash instead. reportError( "Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead." ) if not whitelistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line: # Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing # behavior. reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.") if isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \ ('.counter(' in line or '.gauge(' in line or '.histogram(' in line): reportError("Don't lookup stats by name at runtime; use StatName saved during construction") if re.search("envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}", line): reportError("Don't use mangled Protobuf names for enum constants") hist_m = re.search("(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)", line) if hist_m and not whitelistedForHistogramSiSuffix(hist_m.group(0)): reportError( "Don't suffix histogram names with the unit symbol, " "it's already part of the histogram object and unit-supporting sinks can use this information natively, " "other sinks can add the suffix automatically on flush should they prefer to do so.") if not whitelistedForStdRegex(file_path) and "std::regex" in line: reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher") if not whitelistedForGrpcInit(file_path): grpc_init_or_shutdown = line.find("grpc_init()") grpc_shutdown = line.find("grpc_shutdown()") if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and grpc_shutdown < grpc_init_or_shutdown): grpc_init_or_shutdown = grpc_shutdown if grpc_init_or_shutdown != -1: comment = line.find("// ") if comment == -1 or comment > grpc_init_or_shutdown: reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " + "Grpc::GoogleGrpcContext. See #8282") def checkBuildLine(line, file_path, reportError): if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/")): reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel") if not whitelistedForProtobufDeps(file_path) and '"protobuf"' in line: reportError("unexpected direct external dependency on protobuf, use " "//source/common/protobuf instead.") if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and not isExternalBuildFile(file_path) and "@envoy//" in line): reportError("Superfluous '@envoy//' prefix") def fixBuildLine(file_path, line): if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and not isExternalBuildFile(file_path)): line = line.replace("@envoy//", "//") return line def fixBuildPath(file_path): replaceLines(file_path, functools.partial(fixBuildLine, file_path)) error_messages = [] # TODO(htuch): Add API specific BUILD fixer script. if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isSkylarkFile( file_path) and not isWorkspaceFile(file_path): if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path] if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: error_messages += ["buildifier rewrite failed for file: %s" % file_path] return error_messages def checkBuildPath(file_path): error_messages = [] if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isSkylarkFile( file_path) and not isWorkspaceFile(file_path): command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path) error_messages += executeCommand(command, "envoy_build_fixer check failed", file_path) if isBuildFile(file_path) and (file_path.startswith(args.api_prefix + "envoy") or file_path.startswith(args.api_shadow_prefix + "envoy")): found = False for line in readLines(file_path): if "api_proto_package(" in line: found = True break if not found: error_messages += ["API build file does not provide api_proto_package()"] command = "%s -mode=diff %s" % (BUILDIFIER_PATH, file_path) error_messages += executeCommand(command, "buildifier check failed", file_path) error_messages += checkFileContents(file_path, checkBuildLine) return error_messages def fixSourcePath(file_path): replaceLines(file_path, fixSourceLine) error_messages = [] if not file_path.endswith(DOCS_SUFFIX): if not file_path.endswith(PROTO_SUFFIX): error_messages += fixHeaderOrder(file_path) error_messages += clangFormat(file_path) if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path): package_name, error_message = packageNameForProto(file_path) if package_name is None: error_messages += error_message return error_messages def checkSourcePath(file_path): error_messages = checkFileContents(file_path, checkSourceLine) if not file_path.endswith(DOCS_SUFFIX): if not file_path.endswith(PROTO_SUFFIX): error_messages += checkNamespace(file_path) command = ("%s --include_dir_order %s --path %s | diff %s -" % (HEADER_ORDER_PATH, include_dir_order, file_path, file_path)) error_messages += executeCommand(command, "header_order.py check failed", file_path) command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path)) error_messages += executeCommand(command, "clang-format check failed", file_path) if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path): package_name, error_message = packageNameForProto(file_path) if package_name is None: error_messages += error_message return error_messages # Example target outputs are: # - "26,27c26" # - "12,13d13" # - "7a8,9" def executeCommand(command, error_message, file_path, regex=re.compile(r"^(\d+)[a|c|d]?\d*(?:,\d+[a|c|d]?\d*)?$")): try: output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip() if output: return output.decode('utf-8').split("\n") return [] except subprocess.CalledProcessError as e: if (e.returncode != 0 and e.returncode != 1): return ["ERROR: something went wrong while executing: %s" % e.cmd] # In case we can't find any line numbers, record an error message first. error_messages = ["%s for file: %s" % (error_message, file_path)] for line in e.output.decode('utf-8').splitlines(): for num in regex.findall(line): error_messages.append(" %s:%s" % (file_path, num)) return error_messages def fixHeaderOrder(file_path): command = "%s --rewrite --include_dir_order %s --path %s" % (HEADER_ORDER_PATH, include_dir_order, file_path) if os.system(command) != 0: return ["header_order.py rewrite error: %s" % (file_path)] return [] def clangFormat(file_path): command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path) if os.system(command) != 0: return ["clang-format rewrite error: %s" % (file_path)] return [] def checkFormat(file_path): if file_path.startswith(EXCLUDED_PREFIXES): return [] if not file_path.endswith(SUFFIXES): return [] error_messages = [] # Apply fixes first, if asked, and then run checks. If we wind up attempting to fix # an issue, but there's still an error, that's a problem. try_to_fix = operation_type == "fix" if isBuildFile(file_path) or isSkylarkFile(file_path) or isWorkspaceFile(file_path): if try_to_fix: error_messages += fixBuildPath(file_path) error_messages += checkBuildPath(file_path) else: if try_to_fix: error_messages += fixSourcePath(file_path) error_messages += checkSourcePath(file_path) if error_messages: return ["From %s" % file_path] + error_messages return error_messages def checkFormatReturnTraceOnError(file_path): """Run checkFormat and return the traceback of any exception.""" try: return checkFormat(file_path) except: return traceback.format_exc().split("\n") def checkOwners(dir_name, owned_directories, error_messages): """Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS Args: dir_name: the directory being checked. owned_directories: directories currently listed in CODEOWNERS. error_messages: where to put an error message for new unowned directories. """ found = False for owned in owned_directories: if owned.startswith(dir_name) or dir_name.startswith(owned): found = True if not found and dir_name not in UNOWNED_EXTENSIONS: error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name) # MASKED: checkFormatVisitor function (lines 775-805) # checkErrorMessages iterates over the list with error messages and prints # errors and returns a bool based on whether there were any errors. def checkErrorMessages(error_messages): if error_messages: for e in error_messages: print("ERROR: %s" % e) return True return False if __name__ == "__main__": parser = argparse.ArgumentParser(description="Check or fix file format.") parser.add_argument("operation_type", type=str, choices=["check", "fix"], help="specify if the run should 'check' or 'fix' format.") parser.add_argument( "target_path", type=str, nargs="?", default=".", help="specify the root directory for the script to recurse over. Default '.'.") parser.add_argument("--add-excluded-prefixes", type=str, nargs="+", help="exclude additional prefixes.") parser.add_argument("-j", "--num-workers", type=int, default=multiprocessing.cpu_count(), help="number of worker processes to use; defaults to one per core.") parser.add_argument("--api-prefix", type=str, default="./api/", help="path of the API tree.") parser.add_argument("--api-shadow-prefix", type=str, default="./generated_api_shadow/", help="path of the shadow API tree.") parser.add_argument("--skip_envoy_build_rule_check", action="store_true", help="skip checking for '@envoy//' prefix in build rules.") parser.add_argument("--namespace_check", type=str, nargs="?", default="Envoy", help="specify namespace check string. Default 'Envoy'.") parser.add_argument("--namespace_check_excluded_paths", type=str, nargs="+", default=[], help="exclude paths from the namespace_check.") parser.add_argument("--build_fixer_check_excluded_paths", type=str, nargs="+", default=[], help="exclude paths from envoy_build_fixer check.") parser.add_argument("--include_dir_order", type=str, default=",".join(common.includeDirOrder()), help="specify the header block include directory order.") args = parser.parse_args() operation_type = args.operation_type target_path = args.target_path envoy_build_rule_check = not args.skip_envoy_build_rule_check namespace_check = args.namespace_check namespace_check_excluded_paths = args.namespace_check_excluded_paths + [ "./tools/api_boost/testdata/", "./tools/clang_tools/", ] build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [ "./bazel/external/", "./bazel/toolchains/", "./bazel/BUILD", "./tools/clang_tools", ] include_dir_order = args.include_dir_order if args.add_excluded_prefixes: EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes) # Check whether all needed external tools are available. ct_error_messages = checkTools() if checkErrorMessages(ct_error_messages): sys.exit(1) # Returns the list of directories with owners listed in CODEOWNERS. May append errors to # error_messages. def ownedDirectories(error_messages): owned = [] maintainers = [ '@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@junr03', '@dnoe', '@dio', '@jmarantz' ] try: with open('./CODEOWNERS') as f: for line in f: # If this line is of the form "extensions/... @owner1 @owner2" capture the directory # name and store it in the list of directories with documented owners. m = re.search(r'.*(extensions[^@]*\s+)(@.*)', line) if m is not None and not line.startswith('#'): owned.append(m.group(1).strip()) owners = re.findall('@\S+', m.group(2).strip()) if len(owners) < 2: error_messages.append("Extensions require at least 2 owners in CODEOWNERS:\n" " {}".format(line)) maintainer = len(set(owners).intersection(set(maintainers))) > 0 if not maintainer: error_messages.append("Extensions require at least one maintainer OWNER:\n" " {}".format(line)) return owned except IOError: return [] # for the check format tests. # Calculate the list of owned directories once per run. error_messages = [] owned_directories = ownedDirectories(error_messages) if os.path.isfile(target_path): error_messages += checkFormat("./" + target_path) else: pool = multiprocessing.Pool(processes=args.num_workers) results = [] # For each file in target_path, start a new task in the pool and collect the # results (results is passed by reference, and is used as an output). for root, _, files in os.walk(target_path): checkFormatVisitor((pool, results, owned_directories, error_messages), root, files) # Close the pool to new tasks, wait for all of the running tasks to finish, # then collect the error messages. pool.close() pool.join() error_messages += sum((r.get() for r in results), []) if checkErrorMessages(error_messages): print("ERROR: check format failed. run 'tools/check_format.py fix'") sys.exit(1) if operation_type == "check": print("PASS")
def checkFormatVisitor(arg, dir_name, names): """Run checkFormat in parallel for the given files. Args: arg: a tuple (pool, result_list, owned_directories, error_messages) pool and result_list are for starting tasks asynchronously. owned_directories tracks directories listed in the CODEOWNERS file. error_messages is a list of string format errors. dir_name: the parent directory of the given files. names: a list of file names. """ # Unpack the multiprocessing.Pool process pool and list of results. Since # python lists are passed as references, this is used to collect the list of # async results (futures) from running checkFormat and passing them back to # the caller. pool, result_list, owned_directories, error_messags = arg # Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded # manner as it is a small and limited list. source_prefix = './source/' full_prefix = './source/extensions/' # Check to see if this directory is a subdir under /source/extensions # Also ignore top level directories under /source/extensions since we don't # need owners for source/extensions/access_loggers etc, just the subdirectories. if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]: checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages) for file_name in names: result = pool.apply_async(checkFormatReturnTraceOnError, args=(dir_name + "/" + file_name,)) result_list.append(result)
775
805
#!/usr/bin/env python3 import argparse import common import functools import multiprocessing import os import os.path import pathlib import re import subprocess import stat import sys import traceback import shutil import paths EXCLUDED_PREFIXES = ("./generated/", "./thirdparty/", "./build", "./.git/", "./bazel-", "./.cache", "./source/extensions/extensions_build_config.bzl", "./bazel/toolchains/configs/", "./tools/testdata/check_format/", "./tools/pyformat/") SUFFIXES = ("BUILD", "WORKSPACE", ".bzl", ".cc", ".h", ".java", ".m", ".md", ".mm", ".proto", ".rst") DOCS_SUFFIX = (".md", ".rst") PROTO_SUFFIX = (".proto") # Files in these paths can make reference to protobuf stuff directly GOOGLE_PROTOBUF_WHITELIST = ("ci/prebuilt", "source/common/protobuf", "api/test") REPOSITORIES_BZL = "bazel/repositories.bzl" # Files matching these exact names can reference real-world time. These include the class # definitions for real-world time, the construction of them in main(), and perf annotation. # For now it includes the validation server but that really should be injected too. REAL_TIME_WHITELIST = ("./source/common/common/utility.h", "./source/extensions/filters/http/common/aws/utility.cc", "./source/common/event/real_time_system.cc", "./source/common/event/real_time_system.h", "./source/exe/main_common.cc", "./source/exe/main_common.h", "./source/server/config_validation/server.cc", "./source/common/common/perf_annotation.h", "./test/test_common/simulated_time_system.cc", "./test/test_common/simulated_time_system.h", "./test/test_common/test_time.cc", "./test/test_common/test_time.h", "./test/test_common/utility.cc", "./test/test_common/utility.h", "./test/integration/integration.h") # Files in these paths can use MessageLite::SerializeAsString SERIALIZE_AS_STRING_WHITELIST = ( "./source/common/config/version_converter.cc", "./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc", "./test/common/protobuf/utility_test.cc", "./test/common/grpc/codec_test.cc", "./test/common/grpc/codec_fuzz_test.cc", ) # Files in these paths can use Protobuf::util::JsonStringToMessage JSON_STRING_TO_MESSAGE_WHITELIST = ("./source/common/protobuf/utility.cc") # Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing # ones were grandfathered as part of PR #8484 for backwards compatibility. HISTOGRAM_WITH_SI_SUFFIX_WHITELIST = ("downstream_cx_length_ms", "downstream_cx_length_ms", "initialization_time_ms", "loop_duration_us", "poll_delay_us", "request_time_ms", "upstream_cx_connect_ms", "upstream_cx_length_ms") # Files in these paths can use std::regex STD_REGEX_WHITELIST = ("./source/common/common/utility.cc", "./source/common/common/regex.h", "./source/common/common/regex.cc", "./source/common/stats/tag_extractor_impl.h", "./source/common/stats/tag_extractor_impl.cc", "./source/common/access_log/access_log_formatter.cc", "./source/extensions/filters/http/squash/squash_filter.h", "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/http/admin.h", "./source/server/http/admin.cc", "./tools/clang_tools/api_booster/main.cc", "./tools/clang_tools/api_booster/proto_cxx_utils.cc") # Only one C++ file should instantiate grpc_init GRPC_INIT_WHITELIST = ("./source/common/grpc/google_grpc_context.cc") CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-9") BUILDIFIER_PATH = paths.getBuildifier() BUILDOZER_PATH = paths.getBuildozer() ENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "envoy_build_fixer.py") HEADER_ORDER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "header_order.py") SUBDIR_SET = set(common.includeDirOrder()) INCLUDE_ANGLE = "#include <" INCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE) PROTO_PACKAGE_REGEX = re.compile(r"^package (\S+);\n*", re.MULTILINE) X_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\"x-envoy-.*\".*') # yapf: disable PROTOBUF_TYPE_ERRORS = { # Well-known types should be referenced from the ProtobufWkt namespace. "Protobuf::Any": "ProtobufWkt::Any", "Protobuf::Empty": "ProtobufWkt::Empty", "Protobuf::ListValue": "ProtobufWkt::ListValue", "Protobuf::NULL_VALUE": "ProtobufWkt::NULL_VALUE", "Protobuf::StringValue": "ProtobufWkt::StringValue", "Protobuf::Struct": "ProtobufWkt::Struct", "Protobuf::Value": "ProtobufWkt::Value", # Other common mis-namespacing of protobuf types. "ProtobufWkt::Map": "Protobuf::Map", "ProtobufWkt::MapPair": "Protobuf::MapPair", "ProtobufUtil::MessageDifferencer": "Protobuf::util::MessageDifferencer" } LIBCXX_REPLACEMENTS = { "absl::make_unique<": "std::make_unique<", } UNOWNED_EXTENSIONS = { "extensions/filters/http/ratelimit", "extensions/filters/http/buffer", "extensions/filters/http/rbac", "extensions/filters/http/ip_tagging", "extensions/filters/http/tap", "extensions/filters/http/health_check", "extensions/filters/http/cors", "extensions/filters/http/ext_authz", "extensions/filters/http/dynamo", "extensions/filters/http/lua", "extensions/filters/http/common", "extensions/filters/common", "extensions/filters/common/ratelimit", "extensions/filters/common/rbac", "extensions/filters/common/lua", "extensions/filters/listener/original_dst", "extensions/filters/listener/proxy_protocol", "extensions/stat_sinks/statsd", "extensions/stat_sinks/common", "extensions/stat_sinks/common/statsd", "extensions/health_checkers/redis", "extensions/access_loggers/grpc", "extensions/access_loggers/file", "extensions/common/tap", "extensions/transport_sockets/raw_buffer", "extensions/transport_sockets/tap", "extensions/tracers/zipkin", "extensions/tracers/dynamic_ot", "extensions/tracers/opencensus", "extensions/tracers/lightstep", "extensions/tracers/common", "extensions/tracers/common/ot", "extensions/retry/host/previous_hosts", "extensions/filters/network/ratelimit", "extensions/filters/network/client_ssl_auth", "extensions/filters/network/rbac", "extensions/filters/network/tcp_proxy", "extensions/filters/network/echo", "extensions/filters/network/ext_authz", "extensions/filters/network/redis_proxy", "extensions/filters/network/kafka", "extensions/filters/network/kafka/protocol", "extensions/filters/network/kafka/serialization", "extensions/filters/network/mongo_proxy", "extensions/filters/network/common", "extensions/filters/network/common/redis", } # yapf: enable # Map a line transformation function across each line of a file. # .bak temporaries. def replaceLines(path, line_xform): # We used to use fileinput in the older Python 2.7 script, but this doesn't do # inplace mode and UTF-8 in Python 3, so doing it the manual way. output_lines = [line_xform(line) for line in readLines(path)] pathlib.Path(path).write_text('\n'.join(output_lines), encoding='utf-8') # Obtain all the lines in a given file. def readLines(path): return readFile(path).split('\n') # Read a UTF-8 encoded file as a str. def readFile(path): return pathlib.Path(path).read_text(encoding='utf-8') # lookPath searches for the given executable in all directories in PATH # environment variable. If it cannot be found, empty string is returned. def lookPath(executable): for path_dir in os.environ["PATH"].split(os.pathsep): executable_path = os.path.join(path_dir, executable) if os.path.exists(executable_path): return executable_path return "" # pathExists checks whether the given path exists. This function assumes that # the path is absolute and evaluates environment variables. def pathExists(executable): return os.path.exists(os.path.expandvars(executable)) # executableByOthers checks whether the given path has execute permission for # others. def executableByOthers(executable): st = os.stat(os.path.expandvars(executable)) return bool(st.st_mode & stat.S_IXOTH) # Check whether all needed external tools (clang-format, buildifier, buildozer) are # available. def checkTools(): error_messages = [] clang_format_abs_path = lookPath(CLANG_FORMAT_PATH) if clang_format_abs_path: if not executableByOthers(clang_format_abs_path): error_messages.append("command {} exists, but cannot be executed by other " "users".format(CLANG_FORMAT_PATH)) else: error_messages.append( "Command {} not found. If you have clang-format in version 8.x.x " "installed, but the binary name is different or it's not available in " "PATH, please use CLANG_FORMAT environment variable to specify the path. " "Examples:\n" " export CLANG_FORMAT=clang-format-9.0.0\n" " export CLANG_FORMAT=/opt/bin/clang-format-9\n" " export CLANG_FORMAT=/usr/local/opt/llvm@9/bin/clang-format".format(CLANG_FORMAT_PATH)) def checkBazelTool(name, path, var): bazel_tool_abs_path = lookPath(path) if bazel_tool_abs_path: if not executableByOthers(bazel_tool_abs_path): error_messages.append("command {} exists, but cannot be executed by other " "users".format(path)) elif pathExists(path): if not executableByOthers(path): error_messages.append("command {} exists, but cannot be executed by other " "users".format(path)) else: error_messages.append( "Command {} not found. If you have buildifier installed, but the binary " "name is different or it's not available in $GOPATH/bin, please use " "{} environment variable to specify the path. Example:\n" " export {}=/opt/bin/buildifier\n" "If you don't have buildifier installed, you can install it by:\n" " go get -u github.com/bazelbuild/buildtools/{}".format(path, var, var, name)) checkBazelTool('buildifier', BUILDIFIER_PATH, 'BUILDIFIER_BIN') checkBazelTool('buildozer', BUILDOZER_PATH, 'BUILDOZER_BIN') return error_messages def checkNamespace(file_path): for excluded_path in namespace_check_excluded_paths: if file_path.startswith(excluded_path): return [] nolint = "NOLINT(namespace-%s)" % namespace_check.lower() text = readFile(file_path) if not re.search("^\s*namespace\s+%s\s*{" % namespace_check, text, re.MULTILINE) and \ not nolint in text: return ["Unable to find %s namespace or %s for file: %s" % (namespace_check, nolint, file_path)] return [] def packageNameForProto(file_path): package_name = None error_message = [] result = PROTO_PACKAGE_REGEX.search(readFile(file_path)) if result is not None and len(result.groups()) == 1: package_name = result.group(1) if package_name is None: error_message = ["Unable to find package name for proto file: %s" % file_path] return [package_name, error_message] # To avoid breaking the Lyft import, we just check for path inclusion here. def whitelistedForProtobufDeps(file_path): return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \ any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_WHITELIST)) # Real-world time sources should not be instantiated in the source, except for a few # specific cases. They should be passed down from where they are instantied to where # they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager. def whitelistedForRealTime(file_path): if file_path.endswith(".md"): return True return file_path in REAL_TIME_WHITELIST def whitelistedForSerializeAsString(file_path): return file_path in SERIALIZE_AS_STRING_WHITELIST def whitelistedForJsonStringToMessage(file_path): return file_path in JSON_STRING_TO_MESSAGE_WHITELIST def whitelistedForHistogramSiSuffix(name): return name in HISTOGRAM_WITH_SI_SUFFIX_WHITELIST def whitelistedForStdRegex(file_path): return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST or file_path.endswith( DOCS_SUFFIX) def whitelistedForGrpcInit(file_path): return file_path in GRPC_INIT_WHITELIST def whitelistedForUnpackTo(file_path): return file_path.startswith("./test") or file_path in [ "./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h" ] def findSubstringAndReturnError(pattern, file_path, error_message): text = readFile(file_path) if pattern in text: error_messages = [file_path + ": " + error_message] for i, line in enumerate(text.splitlines()): if pattern in line: error_messages.append(" %s:%s" % (file_path, i + 1)) return error_messages return [] def errorIfNoSubstringFound(pattern, file_path, error_message): return [] if pattern in readFile(file_path) else [file_path + ": " + error_message] def isApiFile(file_path): return file_path.startswith(args.api_prefix) or file_path.startswith(args.api_shadow_prefix) def isBuildFile(file_path): basename = os.path.basename(file_path) if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"): return True return False def isExternalBuildFile(file_path): return isBuildFile(file_path) and (file_path.startswith("./bazel/external/") or file_path.startswith("./tools/clang_tools")) def isSkylarkFile(file_path): return file_path.endswith(".bzl") def isWorkspaceFile(file_path): return os.path.basename(file_path) == "WORKSPACE" def isBuildFixerExcludedFile(file_path): for excluded_path in build_fixer_check_excluded_paths: if file_path.startswith(excluded_path): return True return False def hasInvalidAngleBracketDirectory(line): if not line.startswith(INCLUDE_ANGLE): return False path = line[INCLUDE_ANGLE_LEN:] slash = path.find("/") if slash == -1: return False subdir = path[0:slash] return subdir in SUBDIR_SET VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* [a-z \-_]*: [a-z:`]") VERSION_HISTORY_NEW_RELEASE_REGEX = re.compile("^====[=]+$") def checkCurrentReleaseNotes(file_path, error_messages): in_current_release = False for line_number, line in enumerate(readLines(file_path)): def reportError(message): error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) if VERSION_HISTORY_NEW_RELEASE_REGEX.match(line): # If we were in the section for the current release this means we have passed it. if in_current_release: break # If we see a version marker we are now in the section for the current release. in_current_release = True if line.startswith("*") and not VERSION_HISTORY_NEW_LINE_REGEX.match(line): reportError("Version history line malformed. " "Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" % line) def checkFileContents(file_path, checker): error_messages = [] if file_path.endswith("version_history.rst"): # Version file checking has enough special cased logic to merit its own checks. # This only validates entries for the current release as very old release # notes have a different format. checkCurrentReleaseNotes(file_path, error_messages) for line_number, line in enumerate(readLines(file_path)): def reportError(message): error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message)) checker(line, file_path, reportError) return error_messages DOT_MULTI_SPACE_REGEX = re.compile("\\. +") def fixSourceLine(line): # Strip double space after '.' This may prove overenthusiastic and need to # be restricted to comments and metadata files but works for now. line = re.sub(DOT_MULTI_SPACE_REGEX, ". ", line) if hasInvalidAngleBracketDirectory(line): line = line.replace("<", '"').replace(">", '"') # Fix incorrect protobuf namespace references. for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items(): line = line.replace(invalid_construct, valid_construct) # Use recommended cpp stdlib for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items(): line = line.replace(invalid_construct, valid_construct) return line # We want to look for a call to condvar.waitFor, but there's no strong pattern # to the variable name of the condvar. If we just look for ".waitFor" we'll also # pick up time_system_.waitFor(...), and we don't want to return true for that # pattern. But in that case there is a strong pattern of using time_system in # various spellings as the variable name. def hasCondVarWaitFor(line): wait_for = line.find(".waitFor(") if wait_for == -1: return False preceding = line[0:wait_for] if preceding.endswith("time_system") or preceding.endswith("timeSystem()") or \ preceding.endswith("time_system_"): return False return True # Determines whether the filename is either in the specified subdirectory, or # at the top level. We consider files in the top level for the benefit of # the check_format testcases in tools/testdata/check_format. def isInSubdir(filename, *subdirs): # Skip this check for check_format's unit-tests. if filename.count("/") <= 1: return True for subdir in subdirs: if filename.startswith('./' + subdir + '/'): return True return False def checkSourceLine(line, file_path, reportError): # Check fixable errors. These may have been fixed already. if line.find(". ") != -1: reportError("over-enthusiastic spaces") if isInSubdir(file_path, 'source', 'include') and X_ENVOY_USED_DIRECTLY_REGEX.match(line): reportError( "Please do not use the raw literal x-envoy in source code. See Envoy::Http::PrefixValue.") if hasInvalidAngleBracketDirectory(line): reportError("envoy includes should not have angle brackets") for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items(): if invalid_construct in line: reportError("incorrect protobuf type reference %s; " "should be %s" % (invalid_construct, valid_construct)) for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items(): if invalid_construct in line: reportError("term %s should be replaced with standard library term %s" % (invalid_construct, valid_construct)) # Do not include the virtual_includes headers. if re.search("#include.*/_virtual_includes/", line): reportError("Don't include the virtual includes headers.") # Some errors cannot be fixed automatically, and actionable, consistent, # navigable messages should be emitted to make it easy to find and fix # the errors by hand. if not whitelistedForProtobufDeps(file_path): if '"google/protobuf' in line or "google::protobuf" in line: reportError("unexpected direct dependency on google.protobuf, use " "the definitions in common/protobuf/protobuf.h instead.") if line.startswith("#include <mutex>") or line.startswith("#include <condition_variable"): # We don't check here for std::mutex because that may legitimately show up in # comments, for example this one. reportError("Don't use <mutex> or <condition_variable*>, switch to " "Thread::MutexBasicLockable in source/common/common/thread.h") if line.startswith("#include <shared_mutex>"): # We don't check here for std::shared_timed_mutex because that may # legitimately show up in comments, for example this one. reportError("Don't use <shared_mutex>, use absl::Mutex for reader/writer locks.") if not whitelistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line: if "RealTimeSource" in line or \ ("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \ "std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \ "std::this_thread::sleep_for" in line or hasCondVarWaitFor(line): reportError("Don't reference real-world time sources from production code; use injection") if not whitelistedForUnpackTo(file_path): if "UnpackTo" in line: reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead") # Check that we use the absl::Time library if "std::get_time" in line: if "test/" in file_path: reportError("Don't use std::get_time; use TestUtility::parseTime in tests") else: reportError("Don't use std::get_time; use the injectable time system") if "std::put_time" in line: reportError("Don't use std::put_time; use absl::Time equivalent instead") if "gmtime" in line: reportError("Don't use gmtime; use absl::Time equivalent instead") if "mktime" in line: reportError("Don't use mktime; use absl::Time equivalent instead") if "localtime" in line: reportError("Don't use localtime; use absl::Time equivalent instead") if "strftime" in line: reportError("Don't use strftime; use absl::FormatTime instead") if "strptime" in line: reportError("Don't use strptime; use absl::FormatTime instead") if "std::atomic_" in line: # The std::atomic_* free functions are functionally equivalent to calling # operations on std::atomic<T> objects, so prefer to use that instead. reportError("Don't use free std::atomic_* functions, use std::atomic<T> members instead.") if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h": # __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that # can be used instead reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined " "in include/envoy/common/platform.h instead") if re.search("\{\s*\.\w+\s*\=", line): # Designated initializers are not part of the C++14 standard and are not supported # by MSVC reportError("Don't use designated initializers in struct initialization, " "they are not part of C++14") if " ?: " in line: # The ?: operator is non-standard, it is a GCC extension reportError("Don't use the '?:' operator, it is a non-standard GCC extension") if line.startswith("using testing::Test;"): reportError("Don't use 'using testing::Test;, elaborate the type instead") if line.startswith("using testing::TestWithParams;"): reportError("Don't use 'using testing::Test;, elaborate the type instead") if not whitelistedForSerializeAsString(file_path) and "SerializeAsString" in line: # The MessageLite::SerializeAsString doesn't generate deterministic serialization, # use MessageUtil::hash instead. reportError( "Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead." ) if not whitelistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line: # Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing # behavior. reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.") if isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \ ('.counter(' in line or '.gauge(' in line or '.histogram(' in line): reportError("Don't lookup stats by name at runtime; use StatName saved during construction") if re.search("envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}", line): reportError("Don't use mangled Protobuf names for enum constants") hist_m = re.search("(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)", line) if hist_m and not whitelistedForHistogramSiSuffix(hist_m.group(0)): reportError( "Don't suffix histogram names with the unit symbol, " "it's already part of the histogram object and unit-supporting sinks can use this information natively, " "other sinks can add the suffix automatically on flush should they prefer to do so.") if not whitelistedForStdRegex(file_path) and "std::regex" in line: reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher") if not whitelistedForGrpcInit(file_path): grpc_init_or_shutdown = line.find("grpc_init()") grpc_shutdown = line.find("grpc_shutdown()") if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and grpc_shutdown < grpc_init_or_shutdown): grpc_init_or_shutdown = grpc_shutdown if grpc_init_or_shutdown != -1: comment = line.find("// ") if comment == -1 or comment > grpc_init_or_shutdown: reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " + "Grpc::GoogleGrpcContext. See #8282") def checkBuildLine(line, file_path, reportError): if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/")): reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel") if not whitelistedForProtobufDeps(file_path) and '"protobuf"' in line: reportError("unexpected direct external dependency on protobuf, use " "//source/common/protobuf instead.") if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and not isExternalBuildFile(file_path) and "@envoy//" in line): reportError("Superfluous '@envoy//' prefix") def fixBuildLine(file_path, line): if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and not isExternalBuildFile(file_path)): line = line.replace("@envoy//", "//") return line def fixBuildPath(file_path): replaceLines(file_path, functools.partial(fixBuildLine, file_path)) error_messages = [] # TODO(htuch): Add API specific BUILD fixer script. if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isSkylarkFile( file_path) and not isWorkspaceFile(file_path): if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path] if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: error_messages += ["buildifier rewrite failed for file: %s" % file_path] return error_messages def checkBuildPath(file_path): error_messages = [] if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isSkylarkFile( file_path) and not isWorkspaceFile(file_path): command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path) error_messages += executeCommand(command, "envoy_build_fixer check failed", file_path) if isBuildFile(file_path) and (file_path.startswith(args.api_prefix + "envoy") or file_path.startswith(args.api_shadow_prefix + "envoy")): found = False for line in readLines(file_path): if "api_proto_package(" in line: found = True break if not found: error_messages += ["API build file does not provide api_proto_package()"] command = "%s -mode=diff %s" % (BUILDIFIER_PATH, file_path) error_messages += executeCommand(command, "buildifier check failed", file_path) error_messages += checkFileContents(file_path, checkBuildLine) return error_messages def fixSourcePath(file_path): replaceLines(file_path, fixSourceLine) error_messages = [] if not file_path.endswith(DOCS_SUFFIX): if not file_path.endswith(PROTO_SUFFIX): error_messages += fixHeaderOrder(file_path) error_messages += clangFormat(file_path) if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path): package_name, error_message = packageNameForProto(file_path) if package_name is None: error_messages += error_message return error_messages def checkSourcePath(file_path): error_messages = checkFileContents(file_path, checkSourceLine) if not file_path.endswith(DOCS_SUFFIX): if not file_path.endswith(PROTO_SUFFIX): error_messages += checkNamespace(file_path) command = ("%s --include_dir_order %s --path %s | diff %s -" % (HEADER_ORDER_PATH, include_dir_order, file_path, file_path)) error_messages += executeCommand(command, "header_order.py check failed", file_path) command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path)) error_messages += executeCommand(command, "clang-format check failed", file_path) if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path): package_name, error_message = packageNameForProto(file_path) if package_name is None: error_messages += error_message return error_messages # Example target outputs are: # - "26,27c26" # - "12,13d13" # - "7a8,9" def executeCommand(command, error_message, file_path, regex=re.compile(r"^(\d+)[a|c|d]?\d*(?:,\d+[a|c|d]?\d*)?$")): try: output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip() if output: return output.decode('utf-8').split("\n") return [] except subprocess.CalledProcessError as e: if (e.returncode != 0 and e.returncode != 1): return ["ERROR: something went wrong while executing: %s" % e.cmd] # In case we can't find any line numbers, record an error message first. error_messages = ["%s for file: %s" % (error_message, file_path)] for line in e.output.decode('utf-8').splitlines(): for num in regex.findall(line): error_messages.append(" %s:%s" % (file_path, num)) return error_messages def fixHeaderOrder(file_path): command = "%s --rewrite --include_dir_order %s --path %s" % (HEADER_ORDER_PATH, include_dir_order, file_path) if os.system(command) != 0: return ["header_order.py rewrite error: %s" % (file_path)] return [] def clangFormat(file_path): command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path) if os.system(command) != 0: return ["clang-format rewrite error: %s" % (file_path)] return [] def checkFormat(file_path): if file_path.startswith(EXCLUDED_PREFIXES): return [] if not file_path.endswith(SUFFIXES): return [] error_messages = [] # Apply fixes first, if asked, and then run checks. If we wind up attempting to fix # an issue, but there's still an error, that's a problem. try_to_fix = operation_type == "fix" if isBuildFile(file_path) or isSkylarkFile(file_path) or isWorkspaceFile(file_path): if try_to_fix: error_messages += fixBuildPath(file_path) error_messages += checkBuildPath(file_path) else: if try_to_fix: error_messages += fixSourcePath(file_path) error_messages += checkSourcePath(file_path) if error_messages: return ["From %s" % file_path] + error_messages return error_messages def checkFormatReturnTraceOnError(file_path): """Run checkFormat and return the traceback of any exception.""" try: return checkFormat(file_path) except: return traceback.format_exc().split("\n") def checkOwners(dir_name, owned_directories, error_messages): """Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS Args: dir_name: the directory being checked. owned_directories: directories currently listed in CODEOWNERS. error_messages: where to put an error message for new unowned directories. """ found = False for owned in owned_directories: if owned.startswith(dir_name) or dir_name.startswith(owned): found = True if not found and dir_name not in UNOWNED_EXTENSIONS: error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name) def checkFormatVisitor(arg, dir_name, names): """Run checkFormat in parallel for the given files. Args: arg: a tuple (pool, result_list, owned_directories, error_messages) pool and result_list are for starting tasks asynchronously. owned_directories tracks directories listed in the CODEOWNERS file. error_messages is a list of string format errors. dir_name: the parent directory of the given files. names: a list of file names. """ # Unpack the multiprocessing.Pool process pool and list of results. Since # python lists are passed as references, this is used to collect the list of # async results (futures) from running checkFormat and passing them back to # the caller. pool, result_list, owned_directories, error_messags = arg # Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded # manner as it is a small and limited list. source_prefix = './source/' full_prefix = './source/extensions/' # Check to see if this directory is a subdir under /source/extensions # Also ignore top level directories under /source/extensions since we don't # need owners for source/extensions/access_loggers etc, just the subdirectories. if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]: checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages) for file_name in names: result = pool.apply_async(checkFormatReturnTraceOnError, args=(dir_name + "/" + file_name,)) result_list.append(result) # checkErrorMessages iterates over the list with error messages and prints # errors and returns a bool based on whether there were any errors. def checkErrorMessages(error_messages): if error_messages: for e in error_messages: print("ERROR: %s" % e) return True return False if __name__ == "__main__": parser = argparse.ArgumentParser(description="Check or fix file format.") parser.add_argument("operation_type", type=str, choices=["check", "fix"], help="specify if the run should 'check' or 'fix' format.") parser.add_argument( "target_path", type=str, nargs="?", default=".", help="specify the root directory for the script to recurse over. Default '.'.") parser.add_argument("--add-excluded-prefixes", type=str, nargs="+", help="exclude additional prefixes.") parser.add_argument("-j", "--num-workers", type=int, default=multiprocessing.cpu_count(), help="number of worker processes to use; defaults to one per core.") parser.add_argument("--api-prefix", type=str, default="./api/", help="path of the API tree.") parser.add_argument("--api-shadow-prefix", type=str, default="./generated_api_shadow/", help="path of the shadow API tree.") parser.add_argument("--skip_envoy_build_rule_check", action="store_true", help="skip checking for '@envoy//' prefix in build rules.") parser.add_argument("--namespace_check", type=str, nargs="?", default="Envoy", help="specify namespace check string. Default 'Envoy'.") parser.add_argument("--namespace_check_excluded_paths", type=str, nargs="+", default=[], help="exclude paths from the namespace_check.") parser.add_argument("--build_fixer_check_excluded_paths", type=str, nargs="+", default=[], help="exclude paths from envoy_build_fixer check.") parser.add_argument("--include_dir_order", type=str, default=",".join(common.includeDirOrder()), help="specify the header block include directory order.") args = parser.parse_args() operation_type = args.operation_type target_path = args.target_path envoy_build_rule_check = not args.skip_envoy_build_rule_check namespace_check = args.namespace_check namespace_check_excluded_paths = args.namespace_check_excluded_paths + [ "./tools/api_boost/testdata/", "./tools/clang_tools/", ] build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [ "./bazel/external/", "./bazel/toolchains/", "./bazel/BUILD", "./tools/clang_tools", ] include_dir_order = args.include_dir_order if args.add_excluded_prefixes: EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes) # Check whether all needed external tools are available. ct_error_messages = checkTools() if checkErrorMessages(ct_error_messages): sys.exit(1) # Returns the list of directories with owners listed in CODEOWNERS. May append errors to # error_messages. def ownedDirectories(error_messages): owned = [] maintainers = [ '@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@junr03', '@dnoe', '@dio', '@jmarantz' ] try: with open('./CODEOWNERS') as f: for line in f: # If this line is of the form "extensions/... @owner1 @owner2" capture the directory # name and store it in the list of directories with documented owners. m = re.search(r'.*(extensions[^@]*\s+)(@.*)', line) if m is not None and not line.startswith('#'): owned.append(m.group(1).strip()) owners = re.findall('@\S+', m.group(2).strip()) if len(owners) < 2: error_messages.append("Extensions require at least 2 owners in CODEOWNERS:\n" " {}".format(line)) maintainer = len(set(owners).intersection(set(maintainers))) > 0 if not maintainer: error_messages.append("Extensions require at least one maintainer OWNER:\n" " {}".format(line)) return owned except IOError: return [] # for the check format tests. # Calculate the list of owned directories once per run. error_messages = [] owned_directories = ownedDirectories(error_messages) if os.path.isfile(target_path): error_messages += checkFormat("./" + target_path) else: pool = multiprocessing.Pool(processes=args.num_workers) results = [] # For each file in target_path, start a new task in the pool and collect the # results (results is passed by reference, and is used as an output). for root, _, files in os.walk(target_path): checkFormatVisitor((pool, results, owned_directories, error_messages), root, files) # Close the pool to new tasks, wait for all of the running tasks to finish, # then collect the error messages. pool.close() pool.join() error_messages += sum((r.get() for r in results), []) if checkErrorMessages(error_messages): print("ERROR: check format failed. run 'tools/check_format.py fix'") sys.exit(1) if operation_type == "check": print("PASS")
__init__
Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate.
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unitary gate.""" from warnings import warn from typing import List, Optional, Union, Tuple import numpy as np from scipy.linalg import schur from qiskit.circuit.parameter import ParameterExpression from qiskit.circuit.exceptions import CircuitError from .instruction import Instruction class Gate(Instruction): """Unitary gate.""" # MASKED: __init__ function (lines 28-40) # Set higher priority than Numpy array and matrix classes __array_priority__ = 20 def to_matrix(self) -> np.ndarray: """Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called. """ if hasattr(self, '__array__'): # pylint: disable=no-member return self.__array__(dtype=complex) raise CircuitError("to_matrix not defined for this {}".format(type(self))) def power(self, exponent: float): """Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary """ from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import # Should be diagonalized because it's a unitary. decomposition, unitary = schur(Operator(self).data, output='complex') # Raise the diagonal entries to the specified power decomposition_power = list() decomposition_diagonal = decomposition.diagonal() # assert off-diagonal are 0 if not np.allclose(np.diag(decomposition_diagonal), decomposition): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) # Then reconstruct the resulting gate. unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent)) def _return_repeat(self, exponent: float) -> 'Gate': return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits, params=self.params) def assemble(self) -> 'Instruction': """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction @property def label(self) -> str: """Return gate label""" return self._label @label.setter def label(self, name: str): """Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None') def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None, ctrl_state: Optional[Union[int, str]] = None): """Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state """ # pylint: disable=cyclic-import from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state) @staticmethod def _broadcast_single_argument(qarg: List) -> List: """Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]] """ # [q[0], q[1]] -> [q[0]] # -> [q[1]] for arg0 in qarg: yield [arg0], [] @staticmethod def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List: if len(qarg0) == len(qarg1): # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[1], r[1]] for arg0, arg1 in zip(qarg0, qarg1): yield [arg0, arg1], [] elif len(qarg0) == 1: # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[0], r[1]] for arg1 in qarg1: yield [qarg0[0], arg1], [] elif len(qarg1) == 1: # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] # -> [q[1], r[0]] for arg0 in qarg0: yield [arg0, qarg1[0]], [] else: raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' % (qarg0, qarg1)) @staticmethod def _broadcast_3_or_more_args(qargs: List) -> List: if all(len(qarg) == len(qargs[0]) for qarg in qargs): for arg in zip(*qargs): yield list(arg), [] else: raise CircuitError( 'Not sure how to combine these qubit arguments:\n %s\n' % qargs) def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]: """Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation. """ if len(qargs) != self.num_qubits or cargs: raise CircuitError( f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does' f' not match the gate expectation ({self.num_qubits}).') if any(not qarg for qarg in qargs): raise CircuitError('One or more of the arguments are empty') if len(qargs) == 1: return Gate._broadcast_single_argument(qargs[0]) elif len(qargs) == 2: return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif len(qargs) >= 3: return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError('This gate cannot handle %i arguments' % len(qargs)) def validate_parameter(self, parameter): """Gate parameters should be int, float, or ParameterExpression""" if isinstance(parameter, ParameterExpression): if len(parameter.parameters) > 0: return parameter # expression has free parameters, we cannot validate it if not parameter._symbol_expr.is_real: raise CircuitError("Bound parameter expression is complex in gate {}".format( self.name)) return parameter # per default assume parameters must be real when bound if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed " "no earlier than 3 months after that release date. " "Considering creating your own Gate subclass with the method validate_parameter " " to allow this param type." % type(parameter), DeprecationWarning, 3) return parameter else: raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter), self.name))
def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str] = None) -> None: """Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate. """ self._label = label self.definition = None super().__init__(name, num_qubits, 0, params)
28
40
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unitary gate.""" from warnings import warn from typing import List, Optional, Union, Tuple import numpy as np from scipy.linalg import schur from qiskit.circuit.parameter import ParameterExpression from qiskit.circuit.exceptions import CircuitError from .instruction import Instruction class Gate(Instruction): """Unitary gate.""" def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str] = None) -> None: """Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate. """ self._label = label self.definition = None super().__init__(name, num_qubits, 0, params) # Set higher priority than Numpy array and matrix classes __array_priority__ = 20 def to_matrix(self) -> np.ndarray: """Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called. """ if hasattr(self, '__array__'): # pylint: disable=no-member return self.__array__(dtype=complex) raise CircuitError("to_matrix not defined for this {}".format(type(self))) def power(self, exponent: float): """Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary """ from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import # Should be diagonalized because it's a unitary. decomposition, unitary = schur(Operator(self).data, output='complex') # Raise the diagonal entries to the specified power decomposition_power = list() decomposition_diagonal = decomposition.diagonal() # assert off-diagonal are 0 if not np.allclose(np.diag(decomposition_diagonal), decomposition): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) # Then reconstruct the resulting gate. unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent)) def _return_repeat(self, exponent: float) -> 'Gate': return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits, params=self.params) def assemble(self) -> 'Instruction': """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction @property def label(self) -> str: """Return gate label""" return self._label @label.setter def label(self, name: str): """Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None') def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None, ctrl_state: Optional[Union[int, str]] = None): """Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state """ # pylint: disable=cyclic-import from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state) @staticmethod def _broadcast_single_argument(qarg: List) -> List: """Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]] """ # [q[0], q[1]] -> [q[0]] # -> [q[1]] for arg0 in qarg: yield [arg0], [] @staticmethod def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List: if len(qarg0) == len(qarg1): # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[1], r[1]] for arg0, arg1 in zip(qarg0, qarg1): yield [arg0, arg1], [] elif len(qarg0) == 1: # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[0], r[1]] for arg1 in qarg1: yield [qarg0[0], arg1], [] elif len(qarg1) == 1: # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] # -> [q[1], r[0]] for arg0 in qarg0: yield [arg0, qarg1[0]], [] else: raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' % (qarg0, qarg1)) @staticmethod def _broadcast_3_or_more_args(qargs: List) -> List: if all(len(qarg) == len(qargs[0]) for qarg in qargs): for arg in zip(*qargs): yield list(arg), [] else: raise CircuitError( 'Not sure how to combine these qubit arguments:\n %s\n' % qargs) def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]: """Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation. """ if len(qargs) != self.num_qubits or cargs: raise CircuitError( f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does' f' not match the gate expectation ({self.num_qubits}).') if any(not qarg for qarg in qargs): raise CircuitError('One or more of the arguments are empty') if len(qargs) == 1: return Gate._broadcast_single_argument(qargs[0]) elif len(qargs) == 2: return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif len(qargs) >= 3: return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError('This gate cannot handle %i arguments' % len(qargs)) def validate_parameter(self, parameter): """Gate parameters should be int, float, or ParameterExpression""" if isinstance(parameter, ParameterExpression): if len(parameter.parameters) > 0: return parameter # expression has free parameters, we cannot validate it if not parameter._symbol_expr.is_real: raise CircuitError("Bound parameter expression is complex in gate {}".format( self.name)) return parameter # per default assume parameters must be real when bound if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed " "no earlier than 3 months after that release date. " "Considering creating your own Gate subclass with the method validate_parameter " " to allow this param type." % type(parameter), DeprecationWarning, 3) return parameter else: raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter), self.name))
to_matrix
Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called.
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unitary gate.""" from warnings import warn from typing import List, Optional, Union, Tuple import numpy as np from scipy.linalg import schur from qiskit.circuit.parameter import ParameterExpression from qiskit.circuit.exceptions import CircuitError from .instruction import Instruction class Gate(Instruction): """Unitary gate.""" def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str] = None) -> None: """Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate. """ self._label = label self.definition = None super().__init__(name, num_qubits, 0, params) # Set higher priority than Numpy array and matrix classes __array_priority__ = 20 # MASKED: to_matrix function (lines 45-58) def power(self, exponent: float): """Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary """ from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import # Should be diagonalized because it's a unitary. decomposition, unitary = schur(Operator(self).data, output='complex') # Raise the diagonal entries to the specified power decomposition_power = list() decomposition_diagonal = decomposition.diagonal() # assert off-diagonal are 0 if not np.allclose(np.diag(decomposition_diagonal), decomposition): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) # Then reconstruct the resulting gate. unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent)) def _return_repeat(self, exponent: float) -> 'Gate': return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits, params=self.params) def assemble(self) -> 'Instruction': """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction @property def label(self) -> str: """Return gate label""" return self._label @label.setter def label(self, name: str): """Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None') def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None, ctrl_state: Optional[Union[int, str]] = None): """Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state """ # pylint: disable=cyclic-import from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state) @staticmethod def _broadcast_single_argument(qarg: List) -> List: """Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]] """ # [q[0], q[1]] -> [q[0]] # -> [q[1]] for arg0 in qarg: yield [arg0], [] @staticmethod def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List: if len(qarg0) == len(qarg1): # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[1], r[1]] for arg0, arg1 in zip(qarg0, qarg1): yield [arg0, arg1], [] elif len(qarg0) == 1: # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[0], r[1]] for arg1 in qarg1: yield [qarg0[0], arg1], [] elif len(qarg1) == 1: # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] # -> [q[1], r[0]] for arg0 in qarg0: yield [arg0, qarg1[0]], [] else: raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' % (qarg0, qarg1)) @staticmethod def _broadcast_3_or_more_args(qargs: List) -> List: if all(len(qarg) == len(qargs[0]) for qarg in qargs): for arg in zip(*qargs): yield list(arg), [] else: raise CircuitError( 'Not sure how to combine these qubit arguments:\n %s\n' % qargs) def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]: """Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation. """ if len(qargs) != self.num_qubits or cargs: raise CircuitError( f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does' f' not match the gate expectation ({self.num_qubits}).') if any(not qarg for qarg in qargs): raise CircuitError('One or more of the arguments are empty') if len(qargs) == 1: return Gate._broadcast_single_argument(qargs[0]) elif len(qargs) == 2: return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif len(qargs) >= 3: return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError('This gate cannot handle %i arguments' % len(qargs)) def validate_parameter(self, parameter): """Gate parameters should be int, float, or ParameterExpression""" if isinstance(parameter, ParameterExpression): if len(parameter.parameters) > 0: return parameter # expression has free parameters, we cannot validate it if not parameter._symbol_expr.is_real: raise CircuitError("Bound parameter expression is complex in gate {}".format( self.name)) return parameter # per default assume parameters must be real when bound if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed " "no earlier than 3 months after that release date. " "Considering creating your own Gate subclass with the method validate_parameter " " to allow this param type." % type(parameter), DeprecationWarning, 3) return parameter else: raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter), self.name))
def to_matrix(self) -> np.ndarray: """Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called. """ if hasattr(self, '__array__'): # pylint: disable=no-member return self.__array__(dtype=complex) raise CircuitError("to_matrix not defined for this {}".format(type(self)))
45
58
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unitary gate.""" from warnings import warn from typing import List, Optional, Union, Tuple import numpy as np from scipy.linalg import schur from qiskit.circuit.parameter import ParameterExpression from qiskit.circuit.exceptions import CircuitError from .instruction import Instruction class Gate(Instruction): """Unitary gate.""" def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str] = None) -> None: """Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate. """ self._label = label self.definition = None super().__init__(name, num_qubits, 0, params) # Set higher priority than Numpy array and matrix classes __array_priority__ = 20 def to_matrix(self) -> np.ndarray: """Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called. """ if hasattr(self, '__array__'): # pylint: disable=no-member return self.__array__(dtype=complex) raise CircuitError("to_matrix not defined for this {}".format(type(self))) def power(self, exponent: float): """Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary """ from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import # Should be diagonalized because it's a unitary. decomposition, unitary = schur(Operator(self).data, output='complex') # Raise the diagonal entries to the specified power decomposition_power = list() decomposition_diagonal = decomposition.diagonal() # assert off-diagonal are 0 if not np.allclose(np.diag(decomposition_diagonal), decomposition): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) # Then reconstruct the resulting gate. unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent)) def _return_repeat(self, exponent: float) -> 'Gate': return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits, params=self.params) def assemble(self) -> 'Instruction': """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction @property def label(self) -> str: """Return gate label""" return self._label @label.setter def label(self, name: str): """Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None') def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None, ctrl_state: Optional[Union[int, str]] = None): """Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state """ # pylint: disable=cyclic-import from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state) @staticmethod def _broadcast_single_argument(qarg: List) -> List: """Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]] """ # [q[0], q[1]] -> [q[0]] # -> [q[1]] for arg0 in qarg: yield [arg0], [] @staticmethod def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List: if len(qarg0) == len(qarg1): # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[1], r[1]] for arg0, arg1 in zip(qarg0, qarg1): yield [arg0, arg1], [] elif len(qarg0) == 1: # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[0], r[1]] for arg1 in qarg1: yield [qarg0[0], arg1], [] elif len(qarg1) == 1: # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] # -> [q[1], r[0]] for arg0 in qarg0: yield [arg0, qarg1[0]], [] else: raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' % (qarg0, qarg1)) @staticmethod def _broadcast_3_or_more_args(qargs: List) -> List: if all(len(qarg) == len(qargs[0]) for qarg in qargs): for arg in zip(*qargs): yield list(arg), [] else: raise CircuitError( 'Not sure how to combine these qubit arguments:\n %s\n' % qargs) def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]: """Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation. """ if len(qargs) != self.num_qubits or cargs: raise CircuitError( f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does' f' not match the gate expectation ({self.num_qubits}).') if any(not qarg for qarg in qargs): raise CircuitError('One or more of the arguments are empty') if len(qargs) == 1: return Gate._broadcast_single_argument(qargs[0]) elif len(qargs) == 2: return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif len(qargs) >= 3: return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError('This gate cannot handle %i arguments' % len(qargs)) def validate_parameter(self, parameter): """Gate parameters should be int, float, or ParameterExpression""" if isinstance(parameter, ParameterExpression): if len(parameter.parameters) > 0: return parameter # expression has free parameters, we cannot validate it if not parameter._symbol_expr.is_real: raise CircuitError("Bound parameter expression is complex in gate {}".format( self.name)) return parameter # per default assume parameters must be real when bound if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed " "no earlier than 3 months after that release date. " "Considering creating your own Gate subclass with the method validate_parameter " " to allow this param type." % type(parameter), DeprecationWarning, 3) return parameter else: raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter), self.name))
power
Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unitary gate.""" from warnings import warn from typing import List, Optional, Union, Tuple import numpy as np from scipy.linalg import schur from qiskit.circuit.parameter import ParameterExpression from qiskit.circuit.exceptions import CircuitError from .instruction import Instruction class Gate(Instruction): """Unitary gate.""" def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str] = None) -> None: """Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate. """ self._label = label self.definition = None super().__init__(name, num_qubits, 0, params) # Set higher priority than Numpy array and matrix classes __array_priority__ = 20 def to_matrix(self) -> np.ndarray: """Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called. """ if hasattr(self, '__array__'): # pylint: disable=no-member return self.__array__(dtype=complex) raise CircuitError("to_matrix not defined for this {}".format(type(self))) # MASKED: power function (lines 60-88) def _return_repeat(self, exponent: float) -> 'Gate': return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits, params=self.params) def assemble(self) -> 'Instruction': """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction @property def label(self) -> str: """Return gate label""" return self._label @label.setter def label(self, name: str): """Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None') def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None, ctrl_state: Optional[Union[int, str]] = None): """Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state """ # pylint: disable=cyclic-import from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state) @staticmethod def _broadcast_single_argument(qarg: List) -> List: """Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]] """ # [q[0], q[1]] -> [q[0]] # -> [q[1]] for arg0 in qarg: yield [arg0], [] @staticmethod def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List: if len(qarg0) == len(qarg1): # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[1], r[1]] for arg0, arg1 in zip(qarg0, qarg1): yield [arg0, arg1], [] elif len(qarg0) == 1: # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[0], r[1]] for arg1 in qarg1: yield [qarg0[0], arg1], [] elif len(qarg1) == 1: # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] # -> [q[1], r[0]] for arg0 in qarg0: yield [arg0, qarg1[0]], [] else: raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' % (qarg0, qarg1)) @staticmethod def _broadcast_3_or_more_args(qargs: List) -> List: if all(len(qarg) == len(qargs[0]) for qarg in qargs): for arg in zip(*qargs): yield list(arg), [] else: raise CircuitError( 'Not sure how to combine these qubit arguments:\n %s\n' % qargs) def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]: """Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation. """ if len(qargs) != self.num_qubits or cargs: raise CircuitError( f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does' f' not match the gate expectation ({self.num_qubits}).') if any(not qarg for qarg in qargs): raise CircuitError('One or more of the arguments are empty') if len(qargs) == 1: return Gate._broadcast_single_argument(qargs[0]) elif len(qargs) == 2: return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif len(qargs) >= 3: return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError('This gate cannot handle %i arguments' % len(qargs)) def validate_parameter(self, parameter): """Gate parameters should be int, float, or ParameterExpression""" if isinstance(parameter, ParameterExpression): if len(parameter.parameters) > 0: return parameter # expression has free parameters, we cannot validate it if not parameter._symbol_expr.is_real: raise CircuitError("Bound parameter expression is complex in gate {}".format( self.name)) return parameter # per default assume parameters must be real when bound if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed " "no earlier than 3 months after that release date. " "Considering creating your own Gate subclass with the method validate_parameter " " to allow this param type." % type(parameter), DeprecationWarning, 3) return parameter else: raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter), self.name))
def power(self, exponent: float): """Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary """ from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import # Should be diagonalized because it's a unitary. decomposition, unitary = schur(Operator(self).data, output='complex') # Raise the diagonal entries to the specified power decomposition_power = list() decomposition_diagonal = decomposition.diagonal() # assert off-diagonal are 0 if not np.allclose(np.diag(decomposition_diagonal), decomposition): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) # Then reconstruct the resulting gate. unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent))
60
88
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unitary gate.""" from warnings import warn from typing import List, Optional, Union, Tuple import numpy as np from scipy.linalg import schur from qiskit.circuit.parameter import ParameterExpression from qiskit.circuit.exceptions import CircuitError from .instruction import Instruction class Gate(Instruction): """Unitary gate.""" def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str] = None) -> None: """Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate. """ self._label = label self.definition = None super().__init__(name, num_qubits, 0, params) # Set higher priority than Numpy array and matrix classes __array_priority__ = 20 def to_matrix(self) -> np.ndarray: """Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called. """ if hasattr(self, '__array__'): # pylint: disable=no-member return self.__array__(dtype=complex) raise CircuitError("to_matrix not defined for this {}".format(type(self))) def power(self, exponent: float): """Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary """ from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import # Should be diagonalized because it's a unitary. decomposition, unitary = schur(Operator(self).data, output='complex') # Raise the diagonal entries to the specified power decomposition_power = list() decomposition_diagonal = decomposition.diagonal() # assert off-diagonal are 0 if not np.allclose(np.diag(decomposition_diagonal), decomposition): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) # Then reconstruct the resulting gate. unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent)) def _return_repeat(self, exponent: float) -> 'Gate': return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits, params=self.params) def assemble(self) -> 'Instruction': """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction @property def label(self) -> str: """Return gate label""" return self._label @label.setter def label(self, name: str): """Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None') def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None, ctrl_state: Optional[Union[int, str]] = None): """Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state """ # pylint: disable=cyclic-import from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state) @staticmethod def _broadcast_single_argument(qarg: List) -> List: """Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]] """ # [q[0], q[1]] -> [q[0]] # -> [q[1]] for arg0 in qarg: yield [arg0], [] @staticmethod def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List: if len(qarg0) == len(qarg1): # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[1], r[1]] for arg0, arg1 in zip(qarg0, qarg1): yield [arg0, arg1], [] elif len(qarg0) == 1: # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[0], r[1]] for arg1 in qarg1: yield [qarg0[0], arg1], [] elif len(qarg1) == 1: # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] # -> [q[1], r[0]] for arg0 in qarg0: yield [arg0, qarg1[0]], [] else: raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' % (qarg0, qarg1)) @staticmethod def _broadcast_3_or_more_args(qargs: List) -> List: if all(len(qarg) == len(qargs[0]) for qarg in qargs): for arg in zip(*qargs): yield list(arg), [] else: raise CircuitError( 'Not sure how to combine these qubit arguments:\n %s\n' % qargs) def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]: """Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation. """ if len(qargs) != self.num_qubits or cargs: raise CircuitError( f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does' f' not match the gate expectation ({self.num_qubits}).') if any(not qarg for qarg in qargs): raise CircuitError('One or more of the arguments are empty') if len(qargs) == 1: return Gate._broadcast_single_argument(qargs[0]) elif len(qargs) == 2: return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif len(qargs) >= 3: return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError('This gate cannot handle %i arguments' % len(qargs)) def validate_parameter(self, parameter): """Gate parameters should be int, float, or ParameterExpression""" if isinstance(parameter, ParameterExpression): if len(parameter.parameters) > 0: return parameter # expression has free parameters, we cannot validate it if not parameter._symbol_expr.is_real: raise CircuitError("Bound parameter expression is complex in gate {}".format( self.name)) return parameter # per default assume parameters must be real when bound if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed " "no earlier than 3 months after that release date. " "Considering creating your own Gate subclass with the method validate_parameter " " to allow this param type." % type(parameter), DeprecationWarning, 3) return parameter else: raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter), self.name))
broadcast_arguments
Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation.
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unitary gate.""" from warnings import warn from typing import List, Optional, Union, Tuple import numpy as np from scipy.linalg import schur from qiskit.circuit.parameter import ParameterExpression from qiskit.circuit.exceptions import CircuitError from .instruction import Instruction class Gate(Instruction): """Unitary gate.""" def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str] = None) -> None: """Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate. """ self._label = label self.definition = None super().__init__(name, num_qubits, 0, params) # Set higher priority than Numpy array and matrix classes __array_priority__ = 20 def to_matrix(self) -> np.ndarray: """Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called. """ if hasattr(self, '__array__'): # pylint: disable=no-member return self.__array__(dtype=complex) raise CircuitError("to_matrix not defined for this {}".format(type(self))) def power(self, exponent: float): """Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary """ from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import # Should be diagonalized because it's a unitary. decomposition, unitary = schur(Operator(self).data, output='complex') # Raise the diagonal entries to the specified power decomposition_power = list() decomposition_diagonal = decomposition.diagonal() # assert off-diagonal are 0 if not np.allclose(np.diag(decomposition_diagonal), decomposition): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) # Then reconstruct the resulting gate. unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent)) def _return_repeat(self, exponent: float) -> 'Gate': return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits, params=self.params) def assemble(self) -> 'Instruction': """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction @property def label(self) -> str: """Return gate label""" return self._label @label.setter def label(self, name: str): """Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None') def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None, ctrl_state: Optional[Union[int, str]] = None): """Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state """ # pylint: disable=cyclic-import from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state) @staticmethod def _broadcast_single_argument(qarg: List) -> List: """Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]] """ # [q[0], q[1]] -> [q[0]] # -> [q[1]] for arg0 in qarg: yield [arg0], [] @staticmethod def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List: if len(qarg0) == len(qarg1): # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[1], r[1]] for arg0, arg1 in zip(qarg0, qarg1): yield [arg0, arg1], [] elif len(qarg0) == 1: # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[0], r[1]] for arg1 in qarg1: yield [qarg0[0], arg1], [] elif len(qarg1) == 1: # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] # -> [q[1], r[0]] for arg0 in qarg0: yield [arg0, qarg1[0]], [] else: raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' % (qarg0, qarg1)) @staticmethod def _broadcast_3_or_more_args(qargs: List) -> List: if all(len(qarg) == len(qargs[0]) for qarg in qargs): for arg in zip(*qargs): yield list(arg), [] else: raise CircuitError( 'Not sure how to combine these qubit arguments:\n %s\n' % qargs) # MASKED: broadcast_arguments function (lines 184-236) def validate_parameter(self, parameter): """Gate parameters should be int, float, or ParameterExpression""" if isinstance(parameter, ParameterExpression): if len(parameter.parameters) > 0: return parameter # expression has free parameters, we cannot validate it if not parameter._symbol_expr.is_real: raise CircuitError("Bound parameter expression is complex in gate {}".format( self.name)) return parameter # per default assume parameters must be real when bound if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed " "no earlier than 3 months after that release date. " "Considering creating your own Gate subclass with the method validate_parameter " " to allow this param type." % type(parameter), DeprecationWarning, 3) return parameter else: raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter), self.name))
def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]: """Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation. """ if len(qargs) != self.num_qubits or cargs: raise CircuitError( f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does' f' not match the gate expectation ({self.num_qubits}).') if any(not qarg for qarg in qargs): raise CircuitError('One or more of the arguments are empty') if len(qargs) == 1: return Gate._broadcast_single_argument(qargs[0]) elif len(qargs) == 2: return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif len(qargs) >= 3: return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError('This gate cannot handle %i arguments' % len(qargs))
184
236
# This code is part of Qiskit. # # (C) Copyright IBM 2017. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unitary gate.""" from warnings import warn from typing import List, Optional, Union, Tuple import numpy as np from scipy.linalg import schur from qiskit.circuit.parameter import ParameterExpression from qiskit.circuit.exceptions import CircuitError from .instruction import Instruction class Gate(Instruction): """Unitary gate.""" def __init__(self, name: str, num_qubits: int, params: List, label: Optional[str] = None) -> None: """Create a new gate. Args: name: The Qobj name of the gate. num_qubits: The number of qubits the gate acts on. params: A list of parameters. label: An optional label for the gate. """ self._label = label self.definition = None super().__init__(name, num_qubits, 0, params) # Set higher priority than Numpy array and matrix classes __array_priority__ = 20 def to_matrix(self) -> np.ndarray: """Return a Numpy.array for the gate unitary matrix. Returns: np.ndarray: if the Gate subclass has a matrix definition. Raises: CircuitError: If a Gate subclass does not implement this method an exception will be raised when this base class method is called. """ if hasattr(self, '__array__'): # pylint: disable=no-member return self.__array__(dtype=complex) raise CircuitError("to_matrix not defined for this {}".format(type(self))) def power(self, exponent: float): """Creates a unitary gate as `gate^exponent`. Args: exponent (float): Gate^exponent Returns: qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent. Raises: CircuitError: If Gate is not unitary """ from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import # Should be diagonalized because it's a unitary. decomposition, unitary = schur(Operator(self).data, output='complex') # Raise the diagonal entries to the specified power decomposition_power = list() decomposition_diagonal = decomposition.diagonal() # assert off-diagonal are 0 if not np.allclose(np.diag(decomposition_diagonal), decomposition): raise CircuitError('The matrix is not diagonal') for element in decomposition_diagonal: decomposition_power.append(pow(element, exponent)) # Then reconstruct the resulting gate. unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent)) def _return_repeat(self, exponent: float) -> 'Gate': return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits, params=self.params) def assemble(self) -> 'Instruction': """Assemble a QasmQobjInstruction""" instruction = super().assemble() if self.label: instruction.label = self.label return instruction @property def label(self) -> str: """Return gate label""" return self._label @label.setter def label(self, name: str): """Set gate label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, (str, type(None))): self._label = name else: raise TypeError('label expects a string or None') def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None, ctrl_state: Optional[Union[int, str]] = None): """Return controlled version of gate. See :class:`.ControlledGate` for usage. Args: num_ctrl_qubits: number of controls to add to gate (default=1) label: optional gate label ctrl_state: The control state in decimal or as a bitstring (e.g. '111'). If None, use 2**num_ctrl_qubits-1. Returns: qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size num_qubits + 2*num_ctrl_qubits - 1. Raises: QiskitError: unrecognized mode or invalid ctrl_state """ # pylint: disable=cyclic-import from .add_control import add_control return add_control(self, num_ctrl_qubits, label, ctrl_state) @staticmethod def _broadcast_single_argument(qarg: List) -> List: """Expands a single argument. For example: [q[0], q[1]] -> [q[0]], [q[1]] """ # [q[0], q[1]] -> [q[0]] # -> [q[1]] for arg0 in qarg: yield [arg0], [] @staticmethod def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List: if len(qarg0) == len(qarg1): # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[1], r[1]] for arg0, arg1 in zip(qarg0, qarg1): yield [arg0, arg1], [] elif len(qarg0) == 1: # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] # -> [q[0], r[1]] for arg1 in qarg1: yield [qarg0[0], arg1], [] elif len(qarg1) == 1: # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] # -> [q[1], r[0]] for arg0 in qarg0: yield [arg0, qarg1[0]], [] else: raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' % (qarg0, qarg1)) @staticmethod def _broadcast_3_or_more_args(qargs: List) -> List: if all(len(qarg) == len(qargs[0]) for qarg in qargs): for arg in zip(*qargs): yield list(arg), [] else: raise CircuitError( 'Not sure how to combine these qubit arguments:\n %s\n' % qargs) def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]: """Validation and handling of the arguments and its relationship. For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This method yields the arguments in the right grouping. In the given example:: in: [[q[0],q[1]], q[2]],[] outs: [q[0], q[2]], [] [q[1], q[2]], [] The general broadcasting rules are: * If len(qargs) == 1:: [q[0], q[1]] -> [q[0]],[q[1]] * If len(qargs) == 2:: [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]] * If len(qargs) >= 3:: [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...] Args: qargs: List of quantum bit arguments. cargs: List of classical bit arguments. Returns: A tuple with single arguments. Raises: CircuitError: If the input is not valid. For example, the number of arguments does not match the gate expectation. """ if len(qargs) != self.num_qubits or cargs: raise CircuitError( f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does' f' not match the gate expectation ({self.num_qubits}).') if any(not qarg for qarg in qargs): raise CircuitError('One or more of the arguments are empty') if len(qargs) == 1: return Gate._broadcast_single_argument(qargs[0]) elif len(qargs) == 2: return Gate._broadcast_2_arguments(qargs[0], qargs[1]) elif len(qargs) >= 3: return Gate._broadcast_3_or_more_args(qargs) else: raise CircuitError('This gate cannot handle %i arguments' % len(qargs)) def validate_parameter(self, parameter): """Gate parameters should be int, float, or ParameterExpression""" if isinstance(parameter, ParameterExpression): if len(parameter.parameters) > 0: return parameter # expression has free parameters, we cannot validate it if not parameter._symbol_expr.is_real: raise CircuitError("Bound parameter expression is complex in gate {}".format( self.name)) return parameter # per default assume parameters must be real when bound if isinstance(parameter, (int, float)): return parameter elif isinstance(parameter, (np.integer, np.floating)): return parameter.item() elif isinstance(parameter, np.ndarray): warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed " "no earlier than 3 months after that release date. " "Considering creating your own Gate subclass with the method validate_parameter " " to allow this param type." % type(parameter), DeprecationWarning, 3) return parameter else: raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter), self.name))
find_in_path
Search user's PATH for a given executable. Returns: Full path to executable.
# Copyright 2014 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import fcntl import hashlib import os import shutil import subprocess import sys # Allow use of this module even if termcolor is missing. There are many # standalone python scripts in build_tools that can be run directly without # PYTHONPATH set (i.e. not via build/python_wrapper that adds this path. # TODO(sbc): we should probably just assume that all the module dependencies # are present. try: import termcolor except ImportError: termcolor = None from webports import error, paths GS_URL = 'http://storage.googleapis.com/' GS_BUCKET = 'webports' GS_MIRROR_URL = '%s%s/mirror' % (GS_URL, GS_BUCKET) # Require the latest version of the NaCl SDK. webports is built # and tested against the pepper_canary release. To build aginst older # versions of the SDK use the one of the pepper_XX branches (or use # --skip-sdk-version-check). MIN_SDK_VERSION = 49 arch_to_pkgarch = { 'x86_64': 'x86-64', 'i686': 'i686', 'arm': 'arm', 'pnacl': 'pnacl', 'emscripten': 'emscripten', 'le32': 'le32' } # Inverse of arch_to_pkgarch pkgarch_to_arch = {v: k for k, v in arch_to_pkgarch.items()} LOG_ERROR = 0 LOG_WARN = 1 LOG_INFO = 2 LOG_VERBOSE = 3 LOG_TRACE = 4 ELF_MAGIC = '\x7fELF' PEXE_MAGIC = 'PEXE' log_level = LOG_INFO color_mode = 'auto' def colorize(message, color): if termcolor and colorize.enabled: return termcolor.colored(message, color) else: return message def check_stdout_for_color_support(): if color_mode == 'auto': colorize.enabled = sys.stdout.isatty() def is_elf_file(filename): if os.path.islink(filename): return False with open(filename) as f: header = f.read(4) return header == ELF_MAGIC def is_pexe_file(filename): if os.path.islink(filename): return False with open(filename) as f: header = f.read(4) return header == PEXE_MAGIC def memoize(f): """Memoization decorator for functions taking one or more arguments.""" class Memo(dict): def __init__(self, f): super(Memo, self).__init__() self.f = f def __call__(self, *args): return self[args] def __missing__(self, key): ret = self[key] = self.f(*key) return ret return Memo(f) def set_verbose(enabled): if enabled: set_log_level(LOG_VERBOSE) else: set_log_level(LOG_INFO) def set_log_level(verbosity): global log_level log_level = verbosity def log(message, verbosity=LOG_INFO): """Log a message to the console (stdout).""" if log_level < verbosity: return sys.stdout.write(str(message) + '\n') sys.stdout.flush() def log_heading(message, suffix=''): """Log a colored/highlighted message with optional suffix.""" if colorize.enabled: log(colorize(message, 'green') + suffix) else: if log_level > LOG_WARN: # When running in verbose mode make sure heading standout log('###################################################################') log(message + suffix) log('###################################################################') else: log(message + suffix) def warn(message): log('warning: ' + message, LOG_WARN) def trace(message): log(message, LOG_TRACE) def log_verbose(message): log(message, LOG_VERBOSE) # MASKED: find_in_path function (lines 152-168) def download_file(filename, url): """Download a file from a given URL. Args: filename: the name of the file to download the URL to. url: then URL to fetch. """ temp_filename = filename + '.partial' # Ensure curl is in user's PATH find_in_path('curl') curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o', temp_filename] if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): # Add --progress-bar but only if stdout is a TTY device. curl_cmd.append('--progress-bar') else: # otherwise suppress status output, since curl always assumes its # talking to a TTY and writes \r and \b characters. But add # --show-error so that when curl fails it at least prints something. curl_cmd += ['--silent', '--show-error'] curl_cmd.append(url) if log_level > LOG_WARN: log('Downloading: %s [%s]' % (url, filename)) else: log('Downloading: %s' % url.replace(GS_URL, '')) try: subprocess.check_call(curl_cmd) except subprocess.CalledProcessError as e: raise error.Error('Error downloading file: %s' % str(e)) os.rename(temp_filename, filename) def check_stamp(filename, contents=None): """Check that a given stamp file is up-to-date. Returns: False is the file does not exists or is older that that given comparison file, or does not contain the given contents. True otherwise. """ if not os.path.exists(filename): return False if contents is not None: with open(filename) as f: if not f.read().startswith(contents): return False return True @memoize def get_sdk_root(): """Returns the root of the currently configured Native Client SDK.""" root = os.environ.get('NACL_SDK_ROOT') if root is None: local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk') if os.path.exists(local_sdk_root): root = local_sdk_root else: raise error.Error('$NACL_SDK_ROOT not set') if sys.platform == "cygwin": root = root.replace('\\', '/') return root @memoize def get_emscripten_root(): emscripten = os.environ.get('EMSCRIPTEN') if emscripten is None: local_root = os.path.join(paths.OUT_DIR, 'emsdk', 'emscripten') if os.path.exists(local_root): emscripten = local_root else: raise error.Error('$EMSCRIPTEN not set and %s does not exist.' % local_root) if not os.path.isdir(emscripten): raise error.Error('$EMSCRIPTEN environment variable does not point' ' to a directory: %s' % emscripten) return emscripten def setup_emscripten(): if 'EMSCRIPTEN' in os.environ: return local_root = get_emscripten_root() os.environ['EMSCRIPTEN'] = local_root os.environ['EM_CONFIG'] = os.path.join( os.path.dirname(local_root), '.emscripten') try: find_in_path('node') except error.Error: node_bin = os.path.join(paths.OUT_DIR, 'node', 'bin') if not os.path.isdir(node_bin): raise error.Error( 'node not found in path and default path not found: %s' % node_bin) os.environ['PATH'] += ':' + node_bin find_in_path('node') @memoize def get_sdk_version(): """Returns the version (as a string) of the current SDK.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-version']).strip() return version def check_sdk_version(version): """Returns True if the currently configured SDK is 'version' or above.""" return int(get_sdk_version()) >= int(version) @memoize def get_sdk_revision(): """Returns the revision of the currently configured Native Client SDK.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-revision']).strip() return int(version) @memoize def get_platform(): """Returns the current platform name according getos.py.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') platform = subprocess.check_output([getos]).strip() return platform @memoize def get_toolchain_root(config): """Returns the toolchain folder for a given NaCl toolchain.""" if config.toolchain == 'emscripten': return get_emscripten_root() platform = get_platform() if config.toolchain in ('pnacl', 'clang-newlib'): tc_dir = os.path.join('%s_pnacl' % platform) else: tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch] tc_dir = '%s_%s_%s' % (platform, tc_arch, config.libc) return os.path.join(get_sdk_root(), 'toolchain', tc_dir) @memoize def get_install_root(config): """Returns the install location given a build configuration.""" tc_dir = get_toolchain_root(config) if config.toolchain == 'emscripten': return os.path.join(tc_dir, 'system', 'local') if config.toolchain == 'pnacl': tc_dir = os.path.join(tc_dir, 'le32-nacl') else: tc_dir = os.path.join(tc_dir, '%s-nacl' % config.arch) return os.path.join(tc_dir, 'usr') @memoize def get_install_stamp_root(config): """Returns the installation metadata folder for the give configuration.""" tc_root = get_install_root(config) return os.path.join(tc_root, 'var', 'lib', 'npkg') @memoize def get_strip(config): tc_dir = get_toolchain_root(config) if config.toolchain == 'pnacl': strip = os.path.join(tc_dir, 'bin', 'pnacl-strip') else: strip = os.path.join(tc_dir, 'bin', '%s-nacl-strip' % config.arch) assert os.path.exists(strip), 'strip executable not found: %s' % strip return strip def get_install_stamp(package_name, config): """Returns the filename of the install stamp for for a given package. This file is written at install time and contains metadata about the installed package. """ root = get_install_stamp_root(config) return os.path.join(root, package_name + '.info') def get_list_file(package_name, config): """Returns the filename of the list of installed files for a given package. This file is written at install time. """ root = get_install_stamp_root(config) return os.path.join(root, package_name + '.list') def is_installed(package_name, config, stamp_content=None): """Returns True if the given package is installed.""" stamp = get_install_stamp(package_name, config) result = check_stamp(stamp, stamp_content) return result def check_sdk_root(): """Check validity of NACL_SDK_ROOT.""" root = get_sdk_root() if not os.path.isdir(root): raise error.Error('$NACL_SDK_ROOT does not exist: %s' % root) landmark = os.path.join(root, 'tools', 'getos.py') if not os.path.exists(landmark): raise error.Error("$NACL_SDK_ROOT (%s) doesn't look right. " "Couldn't find landmark file (%s)" % (root, landmark)) if not check_sdk_version(MIN_SDK_VERSION): raise error.Error( 'This version of webports requires at least version %s of\n' 'the NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\n' 'to use webports with an older version of the SDK please checkout\n' 'one of the pepper_XX branches (or run with\n' '--skip-sdk-version-check).' % (MIN_SDK_VERSION, get_sdk_version())) def hash_file(filename): """Return the SHA1 (in hex format) of the contents of the given file.""" block_size = 100 * 1024 sha1 = hashlib.sha1() with open(filename) as f: while True: data = f.read(block_size) if not data: break sha1.update(data) return sha1.hexdigest() class HashVerificationError(error.Error): pass def verify_hash(filename, sha1): """Return True if the sha1 of the given file match the sha1 passed in.""" file_sha1 = hash_file(filename) if sha1 != file_sha1: raise HashVerificationError( 'verification failed: %s\nExpected: %s\nActual: %s' % (filename, sha1, file_sha1)) def remove_tree(directory): """Recursively remove a directory and its contents.""" if not os.path.exists(directory): return if not os.path.isdir(directory): raise error.Error('RemoveTree: not a directory: %s', directory) shutil.rmtree(directory) def rel_path(filename): """Return a pathname relative to the root the webports src tree. This is used mostly to make output more readable when printing filenames.""" return os.path.relpath(filename, paths.NACLPORTS_ROOT) def makedirs(directory): if os.path.isdir(directory): return if os.path.exists(directory): raise error.Error('mkdir: File exists and is not a directory: %s' % directory) trace("mkdir: %s" % directory) os.makedirs(directory) class DirLock(object): """Per-directory flock()-based context manager This class will raise an exception if another process already holds the lock for the given directory. """ def __init__(self, lock_dir): if not os.path.exists(lock_dir): makedirs(lock_dir) self.file_name = os.path.join(lock_dir, 'webports.lock') self.fd = open(self.file_name, 'w') def __enter__(self): try: fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception: raise error.Error("Unable to acquire lock (%s): Is webports already " "running?" % self.file_name) def __exit__(self, exc_type, exc_val, exc_tb): os.remove(self.file_name) self.fd.close() class BuildLock(DirLock): """Lock used when building a package (essentially a lock on OUT_DIR)""" def __init__(self): super(BuildLock, self).__init__(paths.OUT_DIR) class InstallLock(DirLock): """Lock used when installing/uninstalling package""" def __init__(self, config): root = get_install_root(config) super(InstallLock, self).__init__(root) check_stdout_for_color_support()
def find_in_path(command_name): """Search user's PATH for a given executable. Returns: Full path to executable. """ extensions = ('',) if not os.path.splitext(command_name)[1] and os.name == 'nt': extensions = ('.bat', '.com', '.exe') for path in os.environ.get('PATH', '').split(os.pathsep): for ext in extensions: full_name = os.path.join(path, command_name + ext) if os.path.exists(full_name) and os.path.isfile(full_name): return full_name raise error.Error('command not found: %s' % command_name)
152
168
# Copyright 2014 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import fcntl import hashlib import os import shutil import subprocess import sys # Allow use of this module even if termcolor is missing. There are many # standalone python scripts in build_tools that can be run directly without # PYTHONPATH set (i.e. not via build/python_wrapper that adds this path. # TODO(sbc): we should probably just assume that all the module dependencies # are present. try: import termcolor except ImportError: termcolor = None from webports import error, paths GS_URL = 'http://storage.googleapis.com/' GS_BUCKET = 'webports' GS_MIRROR_URL = '%s%s/mirror' % (GS_URL, GS_BUCKET) # Require the latest version of the NaCl SDK. webports is built # and tested against the pepper_canary release. To build aginst older # versions of the SDK use the one of the pepper_XX branches (or use # --skip-sdk-version-check). MIN_SDK_VERSION = 49 arch_to_pkgarch = { 'x86_64': 'x86-64', 'i686': 'i686', 'arm': 'arm', 'pnacl': 'pnacl', 'emscripten': 'emscripten', 'le32': 'le32' } # Inverse of arch_to_pkgarch pkgarch_to_arch = {v: k for k, v in arch_to_pkgarch.items()} LOG_ERROR = 0 LOG_WARN = 1 LOG_INFO = 2 LOG_VERBOSE = 3 LOG_TRACE = 4 ELF_MAGIC = '\x7fELF' PEXE_MAGIC = 'PEXE' log_level = LOG_INFO color_mode = 'auto' def colorize(message, color): if termcolor and colorize.enabled: return termcolor.colored(message, color) else: return message def check_stdout_for_color_support(): if color_mode == 'auto': colorize.enabled = sys.stdout.isatty() def is_elf_file(filename): if os.path.islink(filename): return False with open(filename) as f: header = f.read(4) return header == ELF_MAGIC def is_pexe_file(filename): if os.path.islink(filename): return False with open(filename) as f: header = f.read(4) return header == PEXE_MAGIC def memoize(f): """Memoization decorator for functions taking one or more arguments.""" class Memo(dict): def __init__(self, f): super(Memo, self).__init__() self.f = f def __call__(self, *args): return self[args] def __missing__(self, key): ret = self[key] = self.f(*key) return ret return Memo(f) def set_verbose(enabled): if enabled: set_log_level(LOG_VERBOSE) else: set_log_level(LOG_INFO) def set_log_level(verbosity): global log_level log_level = verbosity def log(message, verbosity=LOG_INFO): """Log a message to the console (stdout).""" if log_level < verbosity: return sys.stdout.write(str(message) + '\n') sys.stdout.flush() def log_heading(message, suffix=''): """Log a colored/highlighted message with optional suffix.""" if colorize.enabled: log(colorize(message, 'green') + suffix) else: if log_level > LOG_WARN: # When running in verbose mode make sure heading standout log('###################################################################') log(message + suffix) log('###################################################################') else: log(message + suffix) def warn(message): log('warning: ' + message, LOG_WARN) def trace(message): log(message, LOG_TRACE) def log_verbose(message): log(message, LOG_VERBOSE) def find_in_path(command_name): """Search user's PATH for a given executable. Returns: Full path to executable. """ extensions = ('',) if not os.path.splitext(command_name)[1] and os.name == 'nt': extensions = ('.bat', '.com', '.exe') for path in os.environ.get('PATH', '').split(os.pathsep): for ext in extensions: full_name = os.path.join(path, command_name + ext) if os.path.exists(full_name) and os.path.isfile(full_name): return full_name raise error.Error('command not found: %s' % command_name) def download_file(filename, url): """Download a file from a given URL. Args: filename: the name of the file to download the URL to. url: then URL to fetch. """ temp_filename = filename + '.partial' # Ensure curl is in user's PATH find_in_path('curl') curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o', temp_filename] if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): # Add --progress-bar but only if stdout is a TTY device. curl_cmd.append('--progress-bar') else: # otherwise suppress status output, since curl always assumes its # talking to a TTY and writes \r and \b characters. But add # --show-error so that when curl fails it at least prints something. curl_cmd += ['--silent', '--show-error'] curl_cmd.append(url) if log_level > LOG_WARN: log('Downloading: %s [%s]' % (url, filename)) else: log('Downloading: %s' % url.replace(GS_URL, '')) try: subprocess.check_call(curl_cmd) except subprocess.CalledProcessError as e: raise error.Error('Error downloading file: %s' % str(e)) os.rename(temp_filename, filename) def check_stamp(filename, contents=None): """Check that a given stamp file is up-to-date. Returns: False is the file does not exists or is older that that given comparison file, or does not contain the given contents. True otherwise. """ if not os.path.exists(filename): return False if contents is not None: with open(filename) as f: if not f.read().startswith(contents): return False return True @memoize def get_sdk_root(): """Returns the root of the currently configured Native Client SDK.""" root = os.environ.get('NACL_SDK_ROOT') if root is None: local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk') if os.path.exists(local_sdk_root): root = local_sdk_root else: raise error.Error('$NACL_SDK_ROOT not set') if sys.platform == "cygwin": root = root.replace('\\', '/') return root @memoize def get_emscripten_root(): emscripten = os.environ.get('EMSCRIPTEN') if emscripten is None: local_root = os.path.join(paths.OUT_DIR, 'emsdk', 'emscripten') if os.path.exists(local_root): emscripten = local_root else: raise error.Error('$EMSCRIPTEN not set and %s does not exist.' % local_root) if not os.path.isdir(emscripten): raise error.Error('$EMSCRIPTEN environment variable does not point' ' to a directory: %s' % emscripten) return emscripten def setup_emscripten(): if 'EMSCRIPTEN' in os.environ: return local_root = get_emscripten_root() os.environ['EMSCRIPTEN'] = local_root os.environ['EM_CONFIG'] = os.path.join( os.path.dirname(local_root), '.emscripten') try: find_in_path('node') except error.Error: node_bin = os.path.join(paths.OUT_DIR, 'node', 'bin') if not os.path.isdir(node_bin): raise error.Error( 'node not found in path and default path not found: %s' % node_bin) os.environ['PATH'] += ':' + node_bin find_in_path('node') @memoize def get_sdk_version(): """Returns the version (as a string) of the current SDK.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-version']).strip() return version def check_sdk_version(version): """Returns True if the currently configured SDK is 'version' or above.""" return int(get_sdk_version()) >= int(version) @memoize def get_sdk_revision(): """Returns the revision of the currently configured Native Client SDK.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-revision']).strip() return int(version) @memoize def get_platform(): """Returns the current platform name according getos.py.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') platform = subprocess.check_output([getos]).strip() return platform @memoize def get_toolchain_root(config): """Returns the toolchain folder for a given NaCl toolchain.""" if config.toolchain == 'emscripten': return get_emscripten_root() platform = get_platform() if config.toolchain in ('pnacl', 'clang-newlib'): tc_dir = os.path.join('%s_pnacl' % platform) else: tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch] tc_dir = '%s_%s_%s' % (platform, tc_arch, config.libc) return os.path.join(get_sdk_root(), 'toolchain', tc_dir) @memoize def get_install_root(config): """Returns the install location given a build configuration.""" tc_dir = get_toolchain_root(config) if config.toolchain == 'emscripten': return os.path.join(tc_dir, 'system', 'local') if config.toolchain == 'pnacl': tc_dir = os.path.join(tc_dir, 'le32-nacl') else: tc_dir = os.path.join(tc_dir, '%s-nacl' % config.arch) return os.path.join(tc_dir, 'usr') @memoize def get_install_stamp_root(config): """Returns the installation metadata folder for the give configuration.""" tc_root = get_install_root(config) return os.path.join(tc_root, 'var', 'lib', 'npkg') @memoize def get_strip(config): tc_dir = get_toolchain_root(config) if config.toolchain == 'pnacl': strip = os.path.join(tc_dir, 'bin', 'pnacl-strip') else: strip = os.path.join(tc_dir, 'bin', '%s-nacl-strip' % config.arch) assert os.path.exists(strip), 'strip executable not found: %s' % strip return strip def get_install_stamp(package_name, config): """Returns the filename of the install stamp for for a given package. This file is written at install time and contains metadata about the installed package. """ root = get_install_stamp_root(config) return os.path.join(root, package_name + '.info') def get_list_file(package_name, config): """Returns the filename of the list of installed files for a given package. This file is written at install time. """ root = get_install_stamp_root(config) return os.path.join(root, package_name + '.list') def is_installed(package_name, config, stamp_content=None): """Returns True if the given package is installed.""" stamp = get_install_stamp(package_name, config) result = check_stamp(stamp, stamp_content) return result def check_sdk_root(): """Check validity of NACL_SDK_ROOT.""" root = get_sdk_root() if not os.path.isdir(root): raise error.Error('$NACL_SDK_ROOT does not exist: %s' % root) landmark = os.path.join(root, 'tools', 'getos.py') if not os.path.exists(landmark): raise error.Error("$NACL_SDK_ROOT (%s) doesn't look right. " "Couldn't find landmark file (%s)" % (root, landmark)) if not check_sdk_version(MIN_SDK_VERSION): raise error.Error( 'This version of webports requires at least version %s of\n' 'the NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\n' 'to use webports with an older version of the SDK please checkout\n' 'one of the pepper_XX branches (or run with\n' '--skip-sdk-version-check).' % (MIN_SDK_VERSION, get_sdk_version())) def hash_file(filename): """Return the SHA1 (in hex format) of the contents of the given file.""" block_size = 100 * 1024 sha1 = hashlib.sha1() with open(filename) as f: while True: data = f.read(block_size) if not data: break sha1.update(data) return sha1.hexdigest() class HashVerificationError(error.Error): pass def verify_hash(filename, sha1): """Return True if the sha1 of the given file match the sha1 passed in.""" file_sha1 = hash_file(filename) if sha1 != file_sha1: raise HashVerificationError( 'verification failed: %s\nExpected: %s\nActual: %s' % (filename, sha1, file_sha1)) def remove_tree(directory): """Recursively remove a directory and its contents.""" if not os.path.exists(directory): return if not os.path.isdir(directory): raise error.Error('RemoveTree: not a directory: %s', directory) shutil.rmtree(directory) def rel_path(filename): """Return a pathname relative to the root the webports src tree. This is used mostly to make output more readable when printing filenames.""" return os.path.relpath(filename, paths.NACLPORTS_ROOT) def makedirs(directory): if os.path.isdir(directory): return if os.path.exists(directory): raise error.Error('mkdir: File exists and is not a directory: %s' % directory) trace("mkdir: %s" % directory) os.makedirs(directory) class DirLock(object): """Per-directory flock()-based context manager This class will raise an exception if another process already holds the lock for the given directory. """ def __init__(self, lock_dir): if not os.path.exists(lock_dir): makedirs(lock_dir) self.file_name = os.path.join(lock_dir, 'webports.lock') self.fd = open(self.file_name, 'w') def __enter__(self): try: fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception: raise error.Error("Unable to acquire lock (%s): Is webports already " "running?" % self.file_name) def __exit__(self, exc_type, exc_val, exc_tb): os.remove(self.file_name) self.fd.close() class BuildLock(DirLock): """Lock used when building a package (essentially a lock on OUT_DIR)""" def __init__(self): super(BuildLock, self).__init__(paths.OUT_DIR) class InstallLock(DirLock): """Lock used when installing/uninstalling package""" def __init__(self, config): root = get_install_root(config) super(InstallLock, self).__init__(root) check_stdout_for_color_support()
check_stamp
Check that a given stamp file is up-to-date. Returns: False is the file does not exists or is older that that given comparison file, or does not contain the given contents. True otherwise.
# Copyright 2014 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import fcntl import hashlib import os import shutil import subprocess import sys # Allow use of this module even if termcolor is missing. There are many # standalone python scripts in build_tools that can be run directly without # PYTHONPATH set (i.e. not via build/python_wrapper that adds this path. # TODO(sbc): we should probably just assume that all the module dependencies # are present. try: import termcolor except ImportError: termcolor = None from webports import error, paths GS_URL = 'http://storage.googleapis.com/' GS_BUCKET = 'webports' GS_MIRROR_URL = '%s%s/mirror' % (GS_URL, GS_BUCKET) # Require the latest version of the NaCl SDK. webports is built # and tested against the pepper_canary release. To build aginst older # versions of the SDK use the one of the pepper_XX branches (or use # --skip-sdk-version-check). MIN_SDK_VERSION = 49 arch_to_pkgarch = { 'x86_64': 'x86-64', 'i686': 'i686', 'arm': 'arm', 'pnacl': 'pnacl', 'emscripten': 'emscripten', 'le32': 'le32' } # Inverse of arch_to_pkgarch pkgarch_to_arch = {v: k for k, v in arch_to_pkgarch.items()} LOG_ERROR = 0 LOG_WARN = 1 LOG_INFO = 2 LOG_VERBOSE = 3 LOG_TRACE = 4 ELF_MAGIC = '\x7fELF' PEXE_MAGIC = 'PEXE' log_level = LOG_INFO color_mode = 'auto' def colorize(message, color): if termcolor and colorize.enabled: return termcolor.colored(message, color) else: return message def check_stdout_for_color_support(): if color_mode == 'auto': colorize.enabled = sys.stdout.isatty() def is_elf_file(filename): if os.path.islink(filename): return False with open(filename) as f: header = f.read(4) return header == ELF_MAGIC def is_pexe_file(filename): if os.path.islink(filename): return False with open(filename) as f: header = f.read(4) return header == PEXE_MAGIC def memoize(f): """Memoization decorator for functions taking one or more arguments.""" class Memo(dict): def __init__(self, f): super(Memo, self).__init__() self.f = f def __call__(self, *args): return self[args] def __missing__(self, key): ret = self[key] = self.f(*key) return ret return Memo(f) def set_verbose(enabled): if enabled: set_log_level(LOG_VERBOSE) else: set_log_level(LOG_INFO) def set_log_level(verbosity): global log_level log_level = verbosity def log(message, verbosity=LOG_INFO): """Log a message to the console (stdout).""" if log_level < verbosity: return sys.stdout.write(str(message) + '\n') sys.stdout.flush() def log_heading(message, suffix=''): """Log a colored/highlighted message with optional suffix.""" if colorize.enabled: log(colorize(message, 'green') + suffix) else: if log_level > LOG_WARN: # When running in verbose mode make sure heading standout log('###################################################################') log(message + suffix) log('###################################################################') else: log(message + suffix) def warn(message): log('warning: ' + message, LOG_WARN) def trace(message): log(message, LOG_TRACE) def log_verbose(message): log(message, LOG_VERBOSE) def find_in_path(command_name): """Search user's PATH for a given executable. Returns: Full path to executable. """ extensions = ('',) if not os.path.splitext(command_name)[1] and os.name == 'nt': extensions = ('.bat', '.com', '.exe') for path in os.environ.get('PATH', '').split(os.pathsep): for ext in extensions: full_name = os.path.join(path, command_name + ext) if os.path.exists(full_name) and os.path.isfile(full_name): return full_name raise error.Error('command not found: %s' % command_name) def download_file(filename, url): """Download a file from a given URL. Args: filename: the name of the file to download the URL to. url: then URL to fetch. """ temp_filename = filename + '.partial' # Ensure curl is in user's PATH find_in_path('curl') curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o', temp_filename] if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): # Add --progress-bar but only if stdout is a TTY device. curl_cmd.append('--progress-bar') else: # otherwise suppress status output, since curl always assumes its # talking to a TTY and writes \r and \b characters. But add # --show-error so that when curl fails it at least prints something. curl_cmd += ['--silent', '--show-error'] curl_cmd.append(url) if log_level > LOG_WARN: log('Downloading: %s [%s]' % (url, filename)) else: log('Downloading: %s' % url.replace(GS_URL, '')) try: subprocess.check_call(curl_cmd) except subprocess.CalledProcessError as e: raise error.Error('Error downloading file: %s' % str(e)) os.rename(temp_filename, filename) # MASKED: check_stamp function (lines 205-219) @memoize def get_sdk_root(): """Returns the root of the currently configured Native Client SDK.""" root = os.environ.get('NACL_SDK_ROOT') if root is None: local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk') if os.path.exists(local_sdk_root): root = local_sdk_root else: raise error.Error('$NACL_SDK_ROOT not set') if sys.platform == "cygwin": root = root.replace('\\', '/') return root @memoize def get_emscripten_root(): emscripten = os.environ.get('EMSCRIPTEN') if emscripten is None: local_root = os.path.join(paths.OUT_DIR, 'emsdk', 'emscripten') if os.path.exists(local_root): emscripten = local_root else: raise error.Error('$EMSCRIPTEN not set and %s does not exist.' % local_root) if not os.path.isdir(emscripten): raise error.Error('$EMSCRIPTEN environment variable does not point' ' to a directory: %s' % emscripten) return emscripten def setup_emscripten(): if 'EMSCRIPTEN' in os.environ: return local_root = get_emscripten_root() os.environ['EMSCRIPTEN'] = local_root os.environ['EM_CONFIG'] = os.path.join( os.path.dirname(local_root), '.emscripten') try: find_in_path('node') except error.Error: node_bin = os.path.join(paths.OUT_DIR, 'node', 'bin') if not os.path.isdir(node_bin): raise error.Error( 'node not found in path and default path not found: %s' % node_bin) os.environ['PATH'] += ':' + node_bin find_in_path('node') @memoize def get_sdk_version(): """Returns the version (as a string) of the current SDK.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-version']).strip() return version def check_sdk_version(version): """Returns True if the currently configured SDK is 'version' or above.""" return int(get_sdk_version()) >= int(version) @memoize def get_sdk_revision(): """Returns the revision of the currently configured Native Client SDK.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-revision']).strip() return int(version) @memoize def get_platform(): """Returns the current platform name according getos.py.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') platform = subprocess.check_output([getos]).strip() return platform @memoize def get_toolchain_root(config): """Returns the toolchain folder for a given NaCl toolchain.""" if config.toolchain == 'emscripten': return get_emscripten_root() platform = get_platform() if config.toolchain in ('pnacl', 'clang-newlib'): tc_dir = os.path.join('%s_pnacl' % platform) else: tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch] tc_dir = '%s_%s_%s' % (platform, tc_arch, config.libc) return os.path.join(get_sdk_root(), 'toolchain', tc_dir) @memoize def get_install_root(config): """Returns the install location given a build configuration.""" tc_dir = get_toolchain_root(config) if config.toolchain == 'emscripten': return os.path.join(tc_dir, 'system', 'local') if config.toolchain == 'pnacl': tc_dir = os.path.join(tc_dir, 'le32-nacl') else: tc_dir = os.path.join(tc_dir, '%s-nacl' % config.arch) return os.path.join(tc_dir, 'usr') @memoize def get_install_stamp_root(config): """Returns the installation metadata folder for the give configuration.""" tc_root = get_install_root(config) return os.path.join(tc_root, 'var', 'lib', 'npkg') @memoize def get_strip(config): tc_dir = get_toolchain_root(config) if config.toolchain == 'pnacl': strip = os.path.join(tc_dir, 'bin', 'pnacl-strip') else: strip = os.path.join(tc_dir, 'bin', '%s-nacl-strip' % config.arch) assert os.path.exists(strip), 'strip executable not found: %s' % strip return strip def get_install_stamp(package_name, config): """Returns the filename of the install stamp for for a given package. This file is written at install time and contains metadata about the installed package. """ root = get_install_stamp_root(config) return os.path.join(root, package_name + '.info') def get_list_file(package_name, config): """Returns the filename of the list of installed files for a given package. This file is written at install time. """ root = get_install_stamp_root(config) return os.path.join(root, package_name + '.list') def is_installed(package_name, config, stamp_content=None): """Returns True if the given package is installed.""" stamp = get_install_stamp(package_name, config) result = check_stamp(stamp, stamp_content) return result def check_sdk_root(): """Check validity of NACL_SDK_ROOT.""" root = get_sdk_root() if not os.path.isdir(root): raise error.Error('$NACL_SDK_ROOT does not exist: %s' % root) landmark = os.path.join(root, 'tools', 'getos.py') if not os.path.exists(landmark): raise error.Error("$NACL_SDK_ROOT (%s) doesn't look right. " "Couldn't find landmark file (%s)" % (root, landmark)) if not check_sdk_version(MIN_SDK_VERSION): raise error.Error( 'This version of webports requires at least version %s of\n' 'the NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\n' 'to use webports with an older version of the SDK please checkout\n' 'one of the pepper_XX branches (or run with\n' '--skip-sdk-version-check).' % (MIN_SDK_VERSION, get_sdk_version())) def hash_file(filename): """Return the SHA1 (in hex format) of the contents of the given file.""" block_size = 100 * 1024 sha1 = hashlib.sha1() with open(filename) as f: while True: data = f.read(block_size) if not data: break sha1.update(data) return sha1.hexdigest() class HashVerificationError(error.Error): pass def verify_hash(filename, sha1): """Return True if the sha1 of the given file match the sha1 passed in.""" file_sha1 = hash_file(filename) if sha1 != file_sha1: raise HashVerificationError( 'verification failed: %s\nExpected: %s\nActual: %s' % (filename, sha1, file_sha1)) def remove_tree(directory): """Recursively remove a directory and its contents.""" if not os.path.exists(directory): return if not os.path.isdir(directory): raise error.Error('RemoveTree: not a directory: %s', directory) shutil.rmtree(directory) def rel_path(filename): """Return a pathname relative to the root the webports src tree. This is used mostly to make output more readable when printing filenames.""" return os.path.relpath(filename, paths.NACLPORTS_ROOT) def makedirs(directory): if os.path.isdir(directory): return if os.path.exists(directory): raise error.Error('mkdir: File exists and is not a directory: %s' % directory) trace("mkdir: %s" % directory) os.makedirs(directory) class DirLock(object): """Per-directory flock()-based context manager This class will raise an exception if another process already holds the lock for the given directory. """ def __init__(self, lock_dir): if not os.path.exists(lock_dir): makedirs(lock_dir) self.file_name = os.path.join(lock_dir, 'webports.lock') self.fd = open(self.file_name, 'w') def __enter__(self): try: fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception: raise error.Error("Unable to acquire lock (%s): Is webports already " "running?" % self.file_name) def __exit__(self, exc_type, exc_val, exc_tb): os.remove(self.file_name) self.fd.close() class BuildLock(DirLock): """Lock used when building a package (essentially a lock on OUT_DIR)""" def __init__(self): super(BuildLock, self).__init__(paths.OUT_DIR) class InstallLock(DirLock): """Lock used when installing/uninstalling package""" def __init__(self, config): root = get_install_root(config) super(InstallLock, self).__init__(root) check_stdout_for_color_support()
def check_stamp(filename, contents=None): """Check that a given stamp file is up-to-date. Returns: False is the file does not exists or is older that that given comparison file, or does not contain the given contents. True otherwise. """ if not os.path.exists(filename): return False if contents is not None: with open(filename) as f: if not f.read().startswith(contents): return False return True
205
219
# Copyright 2014 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import fcntl import hashlib import os import shutil import subprocess import sys # Allow use of this module even if termcolor is missing. There are many # standalone python scripts in build_tools that can be run directly without # PYTHONPATH set (i.e. not via build/python_wrapper that adds this path. # TODO(sbc): we should probably just assume that all the module dependencies # are present. try: import termcolor except ImportError: termcolor = None from webports import error, paths GS_URL = 'http://storage.googleapis.com/' GS_BUCKET = 'webports' GS_MIRROR_URL = '%s%s/mirror' % (GS_URL, GS_BUCKET) # Require the latest version of the NaCl SDK. webports is built # and tested against the pepper_canary release. To build aginst older # versions of the SDK use the one of the pepper_XX branches (or use # --skip-sdk-version-check). MIN_SDK_VERSION = 49 arch_to_pkgarch = { 'x86_64': 'x86-64', 'i686': 'i686', 'arm': 'arm', 'pnacl': 'pnacl', 'emscripten': 'emscripten', 'le32': 'le32' } # Inverse of arch_to_pkgarch pkgarch_to_arch = {v: k for k, v in arch_to_pkgarch.items()} LOG_ERROR = 0 LOG_WARN = 1 LOG_INFO = 2 LOG_VERBOSE = 3 LOG_TRACE = 4 ELF_MAGIC = '\x7fELF' PEXE_MAGIC = 'PEXE' log_level = LOG_INFO color_mode = 'auto' def colorize(message, color): if termcolor and colorize.enabled: return termcolor.colored(message, color) else: return message def check_stdout_for_color_support(): if color_mode == 'auto': colorize.enabled = sys.stdout.isatty() def is_elf_file(filename): if os.path.islink(filename): return False with open(filename) as f: header = f.read(4) return header == ELF_MAGIC def is_pexe_file(filename): if os.path.islink(filename): return False with open(filename) as f: header = f.read(4) return header == PEXE_MAGIC def memoize(f): """Memoization decorator for functions taking one or more arguments.""" class Memo(dict): def __init__(self, f): super(Memo, self).__init__() self.f = f def __call__(self, *args): return self[args] def __missing__(self, key): ret = self[key] = self.f(*key) return ret return Memo(f) def set_verbose(enabled): if enabled: set_log_level(LOG_VERBOSE) else: set_log_level(LOG_INFO) def set_log_level(verbosity): global log_level log_level = verbosity def log(message, verbosity=LOG_INFO): """Log a message to the console (stdout).""" if log_level < verbosity: return sys.stdout.write(str(message) + '\n') sys.stdout.flush() def log_heading(message, suffix=''): """Log a colored/highlighted message with optional suffix.""" if colorize.enabled: log(colorize(message, 'green') + suffix) else: if log_level > LOG_WARN: # When running in verbose mode make sure heading standout log('###################################################################') log(message + suffix) log('###################################################################') else: log(message + suffix) def warn(message): log('warning: ' + message, LOG_WARN) def trace(message): log(message, LOG_TRACE) def log_verbose(message): log(message, LOG_VERBOSE) def find_in_path(command_name): """Search user's PATH for a given executable. Returns: Full path to executable. """ extensions = ('',) if not os.path.splitext(command_name)[1] and os.name == 'nt': extensions = ('.bat', '.com', '.exe') for path in os.environ.get('PATH', '').split(os.pathsep): for ext in extensions: full_name = os.path.join(path, command_name + ext) if os.path.exists(full_name) and os.path.isfile(full_name): return full_name raise error.Error('command not found: %s' % command_name) def download_file(filename, url): """Download a file from a given URL. Args: filename: the name of the file to download the URL to. url: then URL to fetch. """ temp_filename = filename + '.partial' # Ensure curl is in user's PATH find_in_path('curl') curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o', temp_filename] if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): # Add --progress-bar but only if stdout is a TTY device. curl_cmd.append('--progress-bar') else: # otherwise suppress status output, since curl always assumes its # talking to a TTY and writes \r and \b characters. But add # --show-error so that when curl fails it at least prints something. curl_cmd += ['--silent', '--show-error'] curl_cmd.append(url) if log_level > LOG_WARN: log('Downloading: %s [%s]' % (url, filename)) else: log('Downloading: %s' % url.replace(GS_URL, '')) try: subprocess.check_call(curl_cmd) except subprocess.CalledProcessError as e: raise error.Error('Error downloading file: %s' % str(e)) os.rename(temp_filename, filename) def check_stamp(filename, contents=None): """Check that a given stamp file is up-to-date. Returns: False is the file does not exists or is older that that given comparison file, or does not contain the given contents. True otherwise. """ if not os.path.exists(filename): return False if contents is not None: with open(filename) as f: if not f.read().startswith(contents): return False return True @memoize def get_sdk_root(): """Returns the root of the currently configured Native Client SDK.""" root = os.environ.get('NACL_SDK_ROOT') if root is None: local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk') if os.path.exists(local_sdk_root): root = local_sdk_root else: raise error.Error('$NACL_SDK_ROOT not set') if sys.platform == "cygwin": root = root.replace('\\', '/') return root @memoize def get_emscripten_root(): emscripten = os.environ.get('EMSCRIPTEN') if emscripten is None: local_root = os.path.join(paths.OUT_DIR, 'emsdk', 'emscripten') if os.path.exists(local_root): emscripten = local_root else: raise error.Error('$EMSCRIPTEN not set and %s does not exist.' % local_root) if not os.path.isdir(emscripten): raise error.Error('$EMSCRIPTEN environment variable does not point' ' to a directory: %s' % emscripten) return emscripten def setup_emscripten(): if 'EMSCRIPTEN' in os.environ: return local_root = get_emscripten_root() os.environ['EMSCRIPTEN'] = local_root os.environ['EM_CONFIG'] = os.path.join( os.path.dirname(local_root), '.emscripten') try: find_in_path('node') except error.Error: node_bin = os.path.join(paths.OUT_DIR, 'node', 'bin') if not os.path.isdir(node_bin): raise error.Error( 'node not found in path and default path not found: %s' % node_bin) os.environ['PATH'] += ':' + node_bin find_in_path('node') @memoize def get_sdk_version(): """Returns the version (as a string) of the current SDK.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-version']).strip() return version def check_sdk_version(version): """Returns True if the currently configured SDK is 'version' or above.""" return int(get_sdk_version()) >= int(version) @memoize def get_sdk_revision(): """Returns the revision of the currently configured Native Client SDK.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') version = subprocess.check_output([getos, '--sdk-revision']).strip() return int(version) @memoize def get_platform(): """Returns the current platform name according getos.py.""" getos = os.path.join(get_sdk_root(), 'tools', 'getos.py') platform = subprocess.check_output([getos]).strip() return platform @memoize def get_toolchain_root(config): """Returns the toolchain folder for a given NaCl toolchain.""" if config.toolchain == 'emscripten': return get_emscripten_root() platform = get_platform() if config.toolchain in ('pnacl', 'clang-newlib'): tc_dir = os.path.join('%s_pnacl' % platform) else: tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch] tc_dir = '%s_%s_%s' % (platform, tc_arch, config.libc) return os.path.join(get_sdk_root(), 'toolchain', tc_dir) @memoize def get_install_root(config): """Returns the install location given a build configuration.""" tc_dir = get_toolchain_root(config) if config.toolchain == 'emscripten': return os.path.join(tc_dir, 'system', 'local') if config.toolchain == 'pnacl': tc_dir = os.path.join(tc_dir, 'le32-nacl') else: tc_dir = os.path.join(tc_dir, '%s-nacl' % config.arch) return os.path.join(tc_dir, 'usr') @memoize def get_install_stamp_root(config): """Returns the installation metadata folder for the give configuration.""" tc_root = get_install_root(config) return os.path.join(tc_root, 'var', 'lib', 'npkg') @memoize def get_strip(config): tc_dir = get_toolchain_root(config) if config.toolchain == 'pnacl': strip = os.path.join(tc_dir, 'bin', 'pnacl-strip') else: strip = os.path.join(tc_dir, 'bin', '%s-nacl-strip' % config.arch) assert os.path.exists(strip), 'strip executable not found: %s' % strip return strip def get_install_stamp(package_name, config): """Returns the filename of the install stamp for for a given package. This file is written at install time and contains metadata about the installed package. """ root = get_install_stamp_root(config) return os.path.join(root, package_name + '.info') def get_list_file(package_name, config): """Returns the filename of the list of installed files for a given package. This file is written at install time. """ root = get_install_stamp_root(config) return os.path.join(root, package_name + '.list') def is_installed(package_name, config, stamp_content=None): """Returns True if the given package is installed.""" stamp = get_install_stamp(package_name, config) result = check_stamp(stamp, stamp_content) return result def check_sdk_root(): """Check validity of NACL_SDK_ROOT.""" root = get_sdk_root() if not os.path.isdir(root): raise error.Error('$NACL_SDK_ROOT does not exist: %s' % root) landmark = os.path.join(root, 'tools', 'getos.py') if not os.path.exists(landmark): raise error.Error("$NACL_SDK_ROOT (%s) doesn't look right. " "Couldn't find landmark file (%s)" % (root, landmark)) if not check_sdk_version(MIN_SDK_VERSION): raise error.Error( 'This version of webports requires at least version %s of\n' 'the NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\n' 'to use webports with an older version of the SDK please checkout\n' 'one of the pepper_XX branches (or run with\n' '--skip-sdk-version-check).' % (MIN_SDK_VERSION, get_sdk_version())) def hash_file(filename): """Return the SHA1 (in hex format) of the contents of the given file.""" block_size = 100 * 1024 sha1 = hashlib.sha1() with open(filename) as f: while True: data = f.read(block_size) if not data: break sha1.update(data) return sha1.hexdigest() class HashVerificationError(error.Error): pass def verify_hash(filename, sha1): """Return True if the sha1 of the given file match the sha1 passed in.""" file_sha1 = hash_file(filename) if sha1 != file_sha1: raise HashVerificationError( 'verification failed: %s\nExpected: %s\nActual: %s' % (filename, sha1, file_sha1)) def remove_tree(directory): """Recursively remove a directory and its contents.""" if not os.path.exists(directory): return if not os.path.isdir(directory): raise error.Error('RemoveTree: not a directory: %s', directory) shutil.rmtree(directory) def rel_path(filename): """Return a pathname relative to the root the webports src tree. This is used mostly to make output more readable when printing filenames.""" return os.path.relpath(filename, paths.NACLPORTS_ROOT) def makedirs(directory): if os.path.isdir(directory): return if os.path.exists(directory): raise error.Error('mkdir: File exists and is not a directory: %s' % directory) trace("mkdir: %s" % directory) os.makedirs(directory) class DirLock(object): """Per-directory flock()-based context manager This class will raise an exception if another process already holds the lock for the given directory. """ def __init__(self, lock_dir): if not os.path.exists(lock_dir): makedirs(lock_dir) self.file_name = os.path.join(lock_dir, 'webports.lock') self.fd = open(self.file_name, 'w') def __enter__(self): try: fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception: raise error.Error("Unable to acquire lock (%s): Is webports already " "running?" % self.file_name) def __exit__(self, exc_type, exc_val, exc_tb): os.remove(self.file_name) self.fd.close() class BuildLock(DirLock): """Lock used when building a package (essentially a lock on OUT_DIR)""" def __init__(self): super(BuildLock, self).__init__(paths.OUT_DIR) class InstallLock(DirLock): """Lock used when installing/uninstalling package""" def __init__(self, config): root = get_install_root(config) super(InstallLock, self).__init__(root) check_stdout_for_color_support()
__init__
Response to get user settings API Version: 2018-10-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties. :param pulumi.Input[str] user_settings_name: The name of the user settings
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['UserSettings'] class UserSettings(pulumi.CustomResource): # MASKED: __init__ function (lines 18-62) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'UserSettings': """ Get an existing UserSettings resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["properties"] = None return UserSettings(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def properties(self) -> pulumi.Output['outputs.UserPropertiesResponse']: """ The cloud shell user settings properties. """ return pulumi.get(self, "properties") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]] = None, user_settings_name: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Response to get user settings API Version: 2018-10-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties. :param pulumi.Input[str] user_settings_name: The name of the user settings """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if properties is None and not opts.urn: raise TypeError("Missing required property 'properties'") __props__['properties'] = properties __props__['user_settings_name'] = user_settings_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:portal:UserSettings"), pulumi.Alias(type_="azure-native:portal/latest:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/latest:UserSettings"), pulumi.Alias(type_="azure-native:portal/v20181001:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/v20181001:UserSettings")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(UserSettings, __self__).__init__( 'azure-native:portal:UserSettings', resource_name, __props__, opts)
18
62
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['UserSettings'] class UserSettings(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]] = None, user_settings_name: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Response to get user settings API Version: 2018-10-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties. :param pulumi.Input[str] user_settings_name: The name of the user settings """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if properties is None and not opts.urn: raise TypeError("Missing required property 'properties'") __props__['properties'] = properties __props__['user_settings_name'] = user_settings_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:portal:UserSettings"), pulumi.Alias(type_="azure-native:portal/latest:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/latest:UserSettings"), pulumi.Alias(type_="azure-native:portal/v20181001:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/v20181001:UserSettings")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(UserSettings, __self__).__init__( 'azure-native:portal:UserSettings', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'UserSettings': """ Get an existing UserSettings resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["properties"] = None return UserSettings(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def properties(self) -> pulumi.Output['outputs.UserPropertiesResponse']: """ The cloud shell user settings properties. """ return pulumi.get(self, "properties") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
get
Get an existing UserSettings resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['UserSettings'] class UserSettings(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]] = None, user_settings_name: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Response to get user settings API Version: 2018-10-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties. :param pulumi.Input[str] user_settings_name: The name of the user settings """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if properties is None and not opts.urn: raise TypeError("Missing required property 'properties'") __props__['properties'] = properties __props__['user_settings_name'] = user_settings_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:portal:UserSettings"), pulumi.Alias(type_="azure-native:portal/latest:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/latest:UserSettings"), pulumi.Alias(type_="azure-native:portal/v20181001:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/v20181001:UserSettings")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(UserSettings, __self__).__init__( 'azure-native:portal:UserSettings', resource_name, __props__, opts) # MASKED: get function (lines 64-81) @property @pulumi.getter def properties(self) -> pulumi.Output['outputs.UserPropertiesResponse']: """ The cloud shell user settings properties. """ return pulumi.get(self, "properties") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'UserSettings': """ Get an existing UserSettings resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["properties"] = None return UserSettings(resource_name, opts=opts, __props__=__props__)
64
81
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs from ._enums import * from ._inputs import * __all__ = ['UserSettings'] class UserSettings(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]] = None, user_settings_name: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Response to get user settings API Version: 2018-10-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties. :param pulumi.Input[str] user_settings_name: The name of the user settings """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if properties is None and not opts.urn: raise TypeError("Missing required property 'properties'") __props__['properties'] = properties __props__['user_settings_name'] = user_settings_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:portal:UserSettings"), pulumi.Alias(type_="azure-native:portal/latest:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/latest:UserSettings"), pulumi.Alias(type_="azure-native:portal/v20181001:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/v20181001:UserSettings")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(UserSettings, __self__).__init__( 'azure-native:portal:UserSettings', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'UserSettings': """ Get an existing UserSettings resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["properties"] = None return UserSettings(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def properties(self) -> pulumi.Output['outputs.UserPropertiesResponse']: """ The cloud shell user settings properties. """ return pulumi.get(self, "properties") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
FormatExtensionCategory
Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description.
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" # MASKED: FormatExtensionCategory function (lines 251-266) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions)
251
266
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
FormatHeaderFromFile
Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings.
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) # MASKED: FormatHeaderFromFile function (lines 269-292) def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment
269
292
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
FormatFieldTypeAsJson
Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type.
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment # MASKED: FormatFieldTypeAsJson function (lines 295-309) def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."'
295
309
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
FormatMessageAsJson
Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition.
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' # MASKED: FormatMessageAsJson function (lines 312-331) def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n'
312
331
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
FormatFieldType
Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type.
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s # MASKED: FormatFieldType function (lines 379-435) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type))
379
435
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
FormatMessageAsDefinitionList
Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item.
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options # MASKED: FormatMessageAsDefinitionList function (lines 574-601) def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n'
574
601
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
FormatEnumValueAsDefinitionListItem
Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item.
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' # MASKED: FormatEnumValueAsDefinitionListItem function (lines 604-621) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment)
604
621
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. # See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto # for the underlying protos mentioned in this file. See # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. from collections import defaultdict import json import functools import os import pathlib import re import string import sys from google.protobuf import json_format from bazel_tools.tools.python.runfiles import runfiles import yaml # We have to do some evil things to sys.path due to the way that Python module # resolution works; we have both tools/ trees in bazel_tools and envoy. By # default, Bazel leaves us with a sys.path in which the @bazel_tools repository # takes precedence. Now that we're done with importing runfiles above, we can # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 from udpa.annotations import security_pb2 from udpa.annotations import status_pb2 from validate import validate_pb2 # Namespace prefix for Envoy core APIs. ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' # Namespace prefix for Envoy top-level APIs. ENVOY_PREFIX = '.envoy.' # Namespace prefix for WKTs. WKT_NAMESPACE_PREFIX = '.google.protobuf.' # Namespace prefix for RPCs. RPC_NAMESPACE_PREFIX = '.google.rpc.' # http://www.fileformat.info/info/unicode/char/2063/index.htm UNICODE_INVISIBLE_SEPARATOR = u'\u2063' # Template for data plane API URLs. DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format( os.environ['ENVOY_BLOB_SHA']) # Template for formating extension descriptions. EXTENSION_TEMPLATE = string.Template("""$anchor This extension may be referenced by the qualified name *$extension* .. note:: $status $security_posture """) # Template for formating extension's category/ies. EXTENSION_CATEGORIES_TEMPLATE = string.Template(""" .. tip:: $message: $categories """) # Template for formating an extension category. EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor .. tip:: This extension category has the following known extensions: $extensions """) # A map from the extension security postures (as defined in the # envoy_cc_extension build macro) to human readable text for extension docs. EXTENSION_SECURITY_POSTURES = { 'robust_to_untrusted_downstream': 'This extension is intended to be robust against untrusted downstream traffic. It ' 'assumes that the upstream is trusted.', 'robust_to_untrusted_downstream_and_upstream': 'This extension is intended to be robust against both untrusted downstream and ' 'upstream traffic.', 'requires_trusted_downstream_and_upstream': 'This extension is not hardened and should only be used in deployments' ' where both the downstream and upstream are trusted.', 'unknown': 'This extension has an unknown security posture and should only be ' 'used in deployments where both the downstream and upstream are ' 'trusted.', 'data_plane_agnostic': 'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.', } # A map from the extension status value to a human readable text for extension # docs. EXTENSION_STATUS_VALUES = { 'alpha': 'This extension is functional but has not had substantial production burn time, use only with this caveat.', 'wip': 'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.', } EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text()) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} for _k, _v in EXTENSION_DB.items(): for _cat in _v['categories']: EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) class ProtodocError(Exception): """Base error class for the protodoc module.""" def HideNotImplemented(comment): """Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?""" return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations def GithubUrl(type_context): """Obtain data plane API Github URL by path from a TypeContext. Args: type_context: type_context.TypeContext for node. Returns: A string with a corresponding data plane API GitHub Url. """ if type_context.location is not None: return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name, type_context.location.span[0]) return '' def FormatCommentWithAnnotations(comment, type_name=''): """Format a comment string with additional RST for annotations. Args: comment: comment string. type_name: optional, 'message' or 'enum' may be specified for additional message/enum specific annotations. Returns: A string with additional RST from annotations. """ formatted_extension = '' if annotations.EXTENSION_ANNOTATION in comment.annotations: extension = comment.annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) formatted_extension_category = '' if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations: formatted_extension_category = FormatExtensionCategory( comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION]) comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n') return comment + formatted_extension + formatted_extension_category def MapLines(f, s): """Apply a function across each line in a flat string. Args: f: A string transform function for a line. s: A string consisting of potentially multiple lines. Returns: A flat string with f applied to each line. """ return '\n'.join(f(line) for line in s.split('\n')) def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line def IndentLines(spaces, lines): """Indent a list of strings.""" return map(functools.partial(Indent, spaces), lines) def FormatInternalLink(text, ref): return ':ref:`%s <%s>`' % (text, ref) def FormatExternalLink(text, ref): return '`%s <%s>`_' % (text, ref) def FormatHeader(style, text): """Format RST header. Args: style: underline style, e.g. '=', '-'. text: header text Returns: RST formatted header. """ return '%s\n%s\n\n' % (text, style * len(text)) def FormatExtension(extension): """Format extension metadata as RST. Args: extension: the name of the extension, e.g. com.acme.foo. Returns: RST formatted extension description. """ try: extension_metadata = EXTENSION_DB[extension] anchor = FormatAnchor('extension_' + extension) status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] extension = EXTENSION_TEMPLATE.substitute(anchor=anchor, extension=extension, status=status, security_posture=security_posture) categories = FormatExtensionList(extension_metadata["categories"], "extension_category") cat_or_cats = "categories" if len(categories) > 1 else "category" category_message = f"This extension extends and can be used with the following extension {cat_or_cats}" extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message, categories=categories) return f"{extension}\n\n{extension_category}" except KeyError as e: sys.stderr.write( '\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n') exit(1) # Raising the error buries the above message in tracebacks. def FormatExtensionList(items, prefix="extension", indent=2): indent = " " * indent formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items) return f"{formatted_list}\n{indent}\n" def FormatExtensionCategory(extension_category): """Format extension metadata as RST. Args: extension_category: the name of the extension_category, e.g. com.acme. Returns: RST formatted extension category description. """ try: extensions = EXTENSION_CATEGORIES[extension_category] except KeyError as e: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") anchor = FormatAnchor('extension_category_' + extension_category) extensions = FormatExtensionList(sorted(extensions)) return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions) def FormatHeaderFromFile(style, source_code_info, proto_name): """Format RST header based on special file level title Args: style: underline style, e.g. '=', '-'. source_code_info: SourceCodeInfo object. proto_name: If the file_level_comment does not contain a user specified title, use this as page title. Returns: RST formatted header, and file level comment without page title strings. """ anchor = FormatAnchor(FileCrossRefLabel(proto_name)) stripped_comment = annotations.WithoutAnnotations( StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments))) formatted_extension = '' if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations: extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION] formatted_extension = FormatExtension(extension) if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations: return anchor + FormatHeader( style, source_code_info.file_level_annotations[ annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment def FormatFieldTypeAsJson(type_context, field): """Format FieldDescriptorProto.Type as a pseudo-JSON string. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted pseudo-JSON string representation of field type. """ if TypeNameFromFQN(field.type_name) in type_context.map_typenames: return '"{...}"' if field.label == field.LABEL_REPEATED: return '[]' if field.type == field.TYPE_MESSAGE: return '"{...}"' return '"..."' def FormatMessageAsJson(type_context, msg): """Format a message definition DescriptorProto as a pseudo-JSON block. Args: type_context: contextual information for message/enum/field. msg: message definition DescriptorProto. Return: RST formatted pseudo-JSON string representation of message definition. """ lines = [] for index, field in enumerate(msg.field): field_type_context = type_context.ExtendField(index, field.name) leading_comment = field_type_context.leading_comment if HideNotImplemented(leading_comment): continue lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field))) if lines: return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n' else: return '.. code-block:: json\n\n {}\n\n' def NormalizeFieldTypeName(field_fqn): """Normalize a fully qualified field type name, e.g. .envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: field_fqn: a fully qualified type name from FieldDescriptorProto.type_name. Return: Normalized type name. """ if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):] if field_fqn.startswith(ENVOY_PREFIX): return field_fqn[len(ENVOY_PREFIX):] return field_fqn def NormalizeTypeContextName(type_name): """Normalize a type name, e.g. envoy.foo.bar. Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX. Args: type_name: a name from a TypeContext. Return: Normalized type name. """ return NormalizeFieldTypeName(QualifyTypeName(type_name)) def QualifyTypeName(type_name): return '.' + type_name def TypeNameFromFQN(fqn): return fqn[1:] def FormatEmph(s): """RST format a string for emphasis.""" return '*%s*' % s def FormatFieldType(type_context, field): """Format a FieldDescriptorProto type description. Adds cross-refs for message types. TODO(htuch): Add cross-refs for enums as well. Args: type_context: contextual information for message/enum/field. field: FieldDescriptor proto. Return: RST formatted field type. """ if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith( ENVOY_PREFIX): type_name = NormalizeFieldTypeName(field.type_name) if field.type == field.TYPE_MESSAGE: if type_context.map_typenames and TypeNameFromFQN( field.type_name) in type_context.map_typenames: return 'map<%s, %s>' % tuple( map(functools.partial(FormatFieldType, type_context), type_context.map_typenames[TypeNameFromFQN(field.type_name)])) return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) if field.type == field.TYPE_ENUM: return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] return FormatExternalLink( wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' % wkt.lower()) elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] return FormatExternalLink( rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower()) elif field.type_name: return field.type_name pretty_type_names = { field.TYPE_DOUBLE: 'double', field.TYPE_FLOAT: 'float', field.TYPE_INT32: 'int32', field.TYPE_SFIXED32: 'int32', field.TYPE_SINT32: 'int32', field.TYPE_FIXED32: 'uint32', field.TYPE_UINT32: 'uint32', field.TYPE_INT64: 'int64', field.TYPE_SFIXED64: 'int64', field.TYPE_SINT64: 'int64', field.TYPE_FIXED64: 'uint64', field.TYPE_UINT64: 'uint64', field.TYPE_BOOL: 'bool', field.TYPE_STRING: 'string', field.TYPE_BYTES: 'bytes', } if field.type in pretty_type_names: return FormatExternalLink(pretty_type_names[field.type], 'https://developers.google.com/protocol-buffers/docs/proto#scalar') raise ProtodocError('Unknown field type ' + str(field.type)) def StripLeadingSpace(s): """Remove leading space in flat comment strings.""" return MapLines(lambda s: s[1:], s) def FileCrossRefLabel(msg_name): """File cross reference label.""" return 'envoy_api_file_%s' % msg_name def MessageCrossRefLabel(msg_name): """Message cross reference label.""" return 'envoy_api_msg_%s' % msg_name def EnumCrossRefLabel(enum_name): """Enum cross reference label.""" return 'envoy_api_enum_%s' % enum_name def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name def EnumValueCrossRefLabel(enum_value_name): """Enum value cross reference label.""" return 'envoy_api_enum_value_%s' % enum_value_name def FormatAnchor(label): """Format a label as an Envoy API RST anchor.""" return '.. _%s:\n\n' % label def FormatSecurityOptions(security_option, field, type_context, edge_config): sections = [] if security_option.configure_for_untrusted_downstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.')) if security_option.configure_for_untrusted_upstream: sections.append( Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.')) if edge_config.note: sections.append(Indent(4, edge_config.note)) example_dict = json_format.MessageToDict(edge_config.example) validate_fragment.ValidateFragment(field.type_name[1:], example_dict) field_name = type_context.name.split('.')[-1] example = {field_name: example_dict} sections.append( Indent(4, 'Example configuration for untrusted environments:\n\n') + Indent(4, '.. code-block:: yaml\n\n') + '\n'.join(IndentLines(6, yaml.dump(example).split('\n')))) return '.. attention::\n' + '\n\n'.join(sections) def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest): """Format a FieldDescriptorProto as RST definition list item. Args: outer_type_context: contextual information for enclosing message. type_context: contextual information for message/enum/field. field: FieldDescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ field_annotations = [] anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name))) if field.options.HasExtension(validate_pb2.rules): rule = field.options.Extensions[validate_pb2.rules] if ((rule.HasField('message') and rule.message.required) or (rule.HasField('duration') and rule.duration.required) or (rule.HasField('string') and rule.string.min_len > 0) or (rule.HasField('string') and rule.string.min_bytes > 0) or (rule.HasField('repeated') and rule.repeated.min_items > 0)): field_annotations = ['*REQUIRED*'] leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' if field.HasField('oneof_index'): oneof_context = outer_type_context.ExtendOneof(field.oneof_index, type_context.oneof_names[field.oneof_index]) oneof_comment = oneof_context.leading_comment formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment) if HideNotImplemented(oneof_comment): return '' # If the oneof only has one field and marked required, mark the field as required. if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[ field.oneof_index]: field_annotations = ['*REQUIRED*'] if len(type_context.oneof_fields[field.oneof_index]) > 1: # Fields in oneof shouldn't be marked as required when we have oneof comment below it. field_annotations = [] oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ field.oneof_index] else '\nOnly one of %s may be set.\n' formatted_oneof_comment += oneof_template % ', '.join( FormatInternalLink( f, FieldCrossRefLabel(NormalizeTypeContextName( outer_type_context.ExtendField(i, f).name))) for i, f in type_context.oneof_fields[field.oneof_index]) else: formatted_oneof_comment = '' # If there is a udpa.annotations.security option, include it after the comment. if field.options.HasExtension(security_pb2.security): manifest_description = protodoc_manifest.fields.get(type_context.name) if not manifest_description: raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name) formatted_security_options = FormatSecurityOptions( field.options.Extensions[security_pb2.security], field, type_context, manifest_description.edge_config) else: formatted_security_options = '' pretty_label_names = { field.LABEL_OPTIONAL: '', field.LABEL_REPEATED: '**repeated** ', } comment = '(%s) ' % ', '.join( [pretty_label_names[field.label] + FormatFieldType(type_context, field)] + field_annotations) + formatted_leading_comment return anchor + field.name + '\n' + MapLines(functools.partial( Indent, 2), comment + formatted_oneof_comment) + formatted_security_options def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest): """Format a DescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. msg: DescriptorProto. protodoc_manifest: tools.protodoc.Manifest for proto. Returns: RST formatted definition list item. """ type_context.oneof_fields = defaultdict(list) type_context.oneof_required = defaultdict(bool) type_context.oneof_names = defaultdict(list) for index, field in enumerate(msg.field): if field.HasField('oneof_index'): leading_comment = type_context.ExtendField(index, field.name).leading_comment if HideNotImplemented(leading_comment): continue type_context.oneof_fields[field.oneof_index].append((index, field.name)) for index, oneof_decl in enumerate(msg.oneof_decl): if oneof_decl.options.HasExtension(validate_pb2.required): type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required] type_context.oneof_names[index] = oneof_decl.name return '\n'.join( FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name), field, protodoc_manifest) for index, field in enumerate(msg.field)) + '\n' def FormatEnumValueAsDefinitionListItem(type_context, enum_value): """Format a EnumValueDescriptorProto as RST definition list item. Args: type_context: contextual information for message/enum/field. enum_value: EnumValueDescriptorProto. Returns: RST formatted definition list item. """ anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name))) default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment) if HideNotImplemented(leading_comment): return '' comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment) def FormatEnumAsDefinitionList(type_context, enum): """Format a EnumDescriptorProto as RST definition list. Args: type_context: contextual information for message/enum/field. enum: DescriptorProto. Returns: RST formatted definition list item. """ return '\n'.join( FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name), enum_value) for index, enum_value in enumerate(enum.value)) + '\n' def FormatProtoAsBlockComment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n' class RstFormatVisitor(visitor.Visitor): """Visitor to generate a RST representation from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): r = runfiles.Create() with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: # Load as YAML, emit as JSON and then parse as proto to provide type # checking. protodoc_manifest_untyped = yaml.safe_load(f.read()) self.protodoc_manifest = manifest_pb2.Manifest() json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def VisitEnum(self, enum_proto, type_context): normal_enum_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type)) header = FormatHeader('-', 'Enum %s' % normal_enum_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList( type_context, enum_proto) def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): # Skip messages synthesized to represent map types. if msg_proto.options.map_entry: return '' normal_msg_type = NormalizeTypeContextName(type_context.name) anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type)) header = FormatHeader('-', normal_msg_type) github_url = GithubUrl(type_context) proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n' leading_comment = type_context.leading_comment formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message') if HideNotImplemented(leading_comment): return '' return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson( type_context, msg_proto) + FormatMessageAsDefinitionList( type_context, msg_proto, self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums) def VisitFile(self, file_proto, type_context, services, msgs, enums): has_messages = True if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums): has_messages = False # TODO(mattklein123): The logic in both the doc and transform tool around files without messages # is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs # in the common case. if (has_messages and not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations and file_proto.name.startswith('envoy')): raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format( file_proto.name)) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name) # If there are no messages, we don't include in the doc tree (no support for # service rendering yet). We allow these files to be missing from the # toctrees. if not has_messages: header = ':orphan:\n\n' + header warnings = '' if file_proto.options.HasExtension(status_pb2.file_status): if file_proto.options.Extensions[status_pb2.file_status].work_in_progress: warnings += ('.. warning::\n This API is work-in-progress and is ' 'subject to breaking changes.\n\n') debug_proto = FormatProtoAsBlockComment(file_proto) return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto def Main(): plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)]) if __name__ == '__main__': Main()
get_devilryrole_for_requestuser
Get the devilryrole for the requesting user on the current period (request.cradmin_instance). The return values is the same as for :meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`, exept that this method raises ValueError if it does not find a role.
from django_cradmin import crinstance, crapp from django_cradmin.crinstance import reverse_cradmin_url from devilry.apps.core.models import Period from devilry.devilry_account.models import PeriodPermissionGroup from devilry.devilry_admin.cradminextensions import devilry_crmenu_admin from devilry.devilry_cradmin import devilry_crmenu from devilry.devilry_cradmin import devilry_crinstance from devilry.devilry_admin.views.period import admins from devilry.devilry_admin.views.period import createassignment from devilry.devilry_admin.views.period import examiners from devilry.devilry_admin.views.period import overview from devilry.devilry_admin.views.period import students from devilry.devilry_admin.views.period import edit from devilry.devilry_admin.views.period import overview_all_results from devilry.devilry_qualifiesforexam import cradmin_app as qualifiesforexam from devilry.devilry_admin.views.period.manage_tags import manage_tags class Menu(devilry_crmenu_admin.Menu): def build_menu(self): super(Menu, self).build_menu() period = self.request.cradmin_role self.add_role_menuitem_object() self.add_subject_breadcrumb_item(subject=period.subject) self.add_period_breadcrumb_item(period=period, active=True) def add_subject_breadcrumb_item(self, subject, active=False): if self.cradmin_instance.get_devilryrole_for_requestuser() == 'periodadmin': return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem( label=subject.short_name, url=reverse_cradmin_url( instanceid='devilry_admin_subject_for_periodadmin', appname='overview', roleid=subject.id, viewname=crapp.INDEXVIEW_NAME ), active=active )) else: return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem( label=subject.short_name, url=reverse_cradmin_url( instanceid='devilry_admin_subjectadmin', appname='overview', roleid=subject.id, viewname=crapp.INDEXVIEW_NAME ), active=active )) class CrAdminInstance(devilry_crinstance.BaseCrInstanceAdmin): menuclass = Menu roleclass = Period apps = [ ('overview', overview.App), ('students', students.App), ('examiners', examiners.App), ('admins', admins.App), ('createassignment', createassignment.App), ('edit', edit.App), ('overview_all_results', overview_all_results.App), ('qualifiesforexam', qualifiesforexam.App), ('manage_tags', manage_tags.App), ] id = 'devilry_admin_periodadmin' rolefrontpage_appname = 'overview' def get_rolequeryset(self): return Period.objects.filter_user_is_admin(user=self.request.user)\ .order_by('-start_time') def get_titletext_for_role(self, role): """ Get a short title briefly describing the given ``role``. Remember that the role is n Period. """ period = role return period @classmethod def matches_urlpath(cls, urlpath): return urlpath.startswith('/devilry_admin/period') def __get_devilryrole_for_requestuser(self): period = self.request.cradmin_role devilryrole = PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period( user=self.request.user, period=period ) if devilryrole is None: raise ValueError('Could not find a devilryrole for request.user. This must be a bug in ' 'get_rolequeryset().') return devilryrole # MASKED: get_devilryrole_for_requestuser function (lines 98-109)
def get_devilryrole_for_requestuser(self): """ Get the devilryrole for the requesting user on the current period (request.cradmin_instance). The return values is the same as for :meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`, exept that this method raises ValueError if it does not find a role. """ if not hasattr(self, '_devilryrole_for_requestuser'): self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser() return self._devilryrole_for_requestuser
98
109
from django_cradmin import crinstance, crapp from django_cradmin.crinstance import reverse_cradmin_url from devilry.apps.core.models import Period from devilry.devilry_account.models import PeriodPermissionGroup from devilry.devilry_admin.cradminextensions import devilry_crmenu_admin from devilry.devilry_cradmin import devilry_crmenu from devilry.devilry_cradmin import devilry_crinstance from devilry.devilry_admin.views.period import admins from devilry.devilry_admin.views.period import createassignment from devilry.devilry_admin.views.period import examiners from devilry.devilry_admin.views.period import overview from devilry.devilry_admin.views.period import students from devilry.devilry_admin.views.period import edit from devilry.devilry_admin.views.period import overview_all_results from devilry.devilry_qualifiesforexam import cradmin_app as qualifiesforexam from devilry.devilry_admin.views.period.manage_tags import manage_tags class Menu(devilry_crmenu_admin.Menu): def build_menu(self): super(Menu, self).build_menu() period = self.request.cradmin_role self.add_role_menuitem_object() self.add_subject_breadcrumb_item(subject=period.subject) self.add_period_breadcrumb_item(period=period, active=True) def add_subject_breadcrumb_item(self, subject, active=False): if self.cradmin_instance.get_devilryrole_for_requestuser() == 'periodadmin': return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem( label=subject.short_name, url=reverse_cradmin_url( instanceid='devilry_admin_subject_for_periodadmin', appname='overview', roleid=subject.id, viewname=crapp.INDEXVIEW_NAME ), active=active )) else: return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem( label=subject.short_name, url=reverse_cradmin_url( instanceid='devilry_admin_subjectadmin', appname='overview', roleid=subject.id, viewname=crapp.INDEXVIEW_NAME ), active=active )) class CrAdminInstance(devilry_crinstance.BaseCrInstanceAdmin): menuclass = Menu roleclass = Period apps = [ ('overview', overview.App), ('students', students.App), ('examiners', examiners.App), ('admins', admins.App), ('createassignment', createassignment.App), ('edit', edit.App), ('overview_all_results', overview_all_results.App), ('qualifiesforexam', qualifiesforexam.App), ('manage_tags', manage_tags.App), ] id = 'devilry_admin_periodadmin' rolefrontpage_appname = 'overview' def get_rolequeryset(self): return Period.objects.filter_user_is_admin(user=self.request.user)\ .order_by('-start_time') def get_titletext_for_role(self, role): """ Get a short title briefly describing the given ``role``. Remember that the role is n Period. """ period = role return period @classmethod def matches_urlpath(cls, urlpath): return urlpath.startswith('/devilry_admin/period') def __get_devilryrole_for_requestuser(self): period = self.request.cradmin_role devilryrole = PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period( user=self.request.user, period=period ) if devilryrole is None: raise ValueError('Could not find a devilryrole for request.user. This must be a bug in ' 'get_rolequeryset().') return devilryrole def get_devilryrole_for_requestuser(self): """ Get the devilryrole for the requesting user on the current period (request.cradmin_instance). The return values is the same as for :meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`, exept that this method raises ValueError if it does not find a role. """ if not hasattr(self, '_devilryrole_for_requestuser'): self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser() return self._devilryrole_for_requestuser
get_title
Return the proper title of this object. If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable, its return value will be returned.
"""Provides the MenuItem class.""" from pathlib import Path from typing import TYPE_CHECKING, Optional, Union from attr import attrs from ..action import ActionFunctionType from ..mixins import RegisterEventMixin if TYPE_CHECKING: from ..types import TitleFunction @attrs(auto_attribs=True) class MenuItem(RegisterEventMixin): """An item in a :class:`~earwax.menu.Menu`. This class is rarely used directly, instead :meth:`earwax.menu.Menu.add_item` or :meth:`earwax.menu.Menu.item` can be used to return an instance. :ivar ~earwax.MenuItem.func: The function which will be called when this item is activated. :ivar ~earwax.MenuItem.title: The title of this menu item. If this value is a callable, it should return a string which will be used as the title. :ivar ~earwax.MenuItem.select_sound_path: The path to a sound which should play when this menu item is selected. If this value is ``None`` (the default), then no sound will be heard unless the containing menu has its :attr:`~earwax.Menu.item_select_sound_path` attribute set to something that is not ``None``, or :attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not ``None``. :ivar ~earwax.MenuItem.activate_sound_path: The path to a sound which should play when this menu item is activated. If this value is ``None`` (the default), then no sound will be heard unless the containing menu has its :attr:`~earwax.Menu.item_activate_sound_path` attribute set to something that is not ``None``, or :attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not ``None``. """ func: ActionFunctionType title: Optional[Union[str, "TitleFunction"]] = None select_sound_path: Optional[Path] = None loop_select_sound: bool = False activate_sound_path: Optional[Path] = None def __attrs_post_init__(self) -> None: """Register events.""" self.register_event(self.on_selected) # MASKED: get_title function (lines 62-70) def on_selected(self) -> None: """Handle this menu item being selected.""" pass
def get_title(self) -> Optional[str]: """Return the proper title of this object. If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable, its return value will be returned. """ if callable(self.title): return self.title() return self.title
62
70
"""Provides the MenuItem class.""" from pathlib import Path from typing import TYPE_CHECKING, Optional, Union from attr import attrs from ..action import ActionFunctionType from ..mixins import RegisterEventMixin if TYPE_CHECKING: from ..types import TitleFunction @attrs(auto_attribs=True) class MenuItem(RegisterEventMixin): """An item in a :class:`~earwax.menu.Menu`. This class is rarely used directly, instead :meth:`earwax.menu.Menu.add_item` or :meth:`earwax.menu.Menu.item` can be used to return an instance. :ivar ~earwax.MenuItem.func: The function which will be called when this item is activated. :ivar ~earwax.MenuItem.title: The title of this menu item. If this value is a callable, it should return a string which will be used as the title. :ivar ~earwax.MenuItem.select_sound_path: The path to a sound which should play when this menu item is selected. If this value is ``None`` (the default), then no sound will be heard unless the containing menu has its :attr:`~earwax.Menu.item_select_sound_path` attribute set to something that is not ``None``, or :attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not ``None``. :ivar ~earwax.MenuItem.activate_sound_path: The path to a sound which should play when this menu item is activated. If this value is ``None`` (the default), then no sound will be heard unless the containing menu has its :attr:`~earwax.Menu.item_activate_sound_path` attribute set to something that is not ``None``, or :attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not ``None``. """ func: ActionFunctionType title: Optional[Union[str, "TitleFunction"]] = None select_sound_path: Optional[Path] = None loop_select_sound: bool = False activate_sound_path: Optional[Path] = None def __attrs_post_init__(self) -> None: """Register events.""" self.register_event(self.on_selected) def get_title(self) -> Optional[str]: """Return the proper title of this object. If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable, its return value will be returned. """ if callable(self.title): return self.title() return self.title def on_selected(self) -> None: """Handle this menu item being selected.""" pass
step
Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss.
from typing import Tuple import math import torch from torch.optim.optimizer import Optimizer def linear_warmup_and_cosine_protocol( f_values: Tuple[float, float, float], x_milestones: Tuple[int, int, int, int]): """ There are 5 regions: 1. constant at f0 for x < x0 2. linear increase from f0 to f1 for x0 < x < x1 3. constant at f1 for x1 < x < x2 4. cosine protocol from f1 to f2 for x2 < x < x3 5. constant at f2 for x > x3 If you want a linear_ramp followed by a cosine_decay only simply set: 1. x0=0 (to eliminate the first constant piece) 2. x2=x1 (to eliminate the second constant piece) 3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay) """ assert x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3] def fn(step): if step <= x_milestones[0]: return float(f_values[0]) elif (step > x_milestones[0]) and (step <= x_milestones[1]): m = float(f_values[1] - f_values[0]) / float(max(1, x_milestones[1] - x_milestones[0])) return float(f_values[0]) + m * float(step - x_milestones[0]) elif (step > x_milestones[1]) and (step <= x_milestones[2]): return float(f_values[1]) elif (step > x_milestones[2]) and (step <= x_milestones[3]): progress = float(step - x_milestones[2]) / float(max(1, x_milestones[3] - x_milestones[2])) # in (0,1) tmp = 0.5 * (1.0 + math.cos(math.pi * progress)) # in (1,0) return float(f_values[2]) + tmp * float(f_values[1] - f_values[2]) else: return float(f_values[2]) return fn class LARS(Optimizer): """ Extends SGD in PyTorch with LARS scaling from the paper 'Large batch training of Convolutional Networks <https://arxiv.org/pdf/1708.03888.pdf>'_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float): learning rate momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) trust_coefficient (float, optional): trust coefficient for computing LR (default: 0.001) eps (float, optional): eps for division denominator (default: 1e-8) Example: >>> model = torch.nn.Linear(10, 1) >>> input = torch.Tensor(10) >>> target = torch.Tensor([1.]) >>> loss_fn = lambda input, target: (input - target) ** 2 >>> # >>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9) >>> optimizer.zero_grad() >>> loss_fn(model(input), target).backward() >>> optimizer.step() Note: The application of momentum in the SGD part is modified according to the PyTorch standards. LARS scaling fits into the equation in the following fashion. .. math:: \begin{aligned} g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\ v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\ p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, \\end{aligned} where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` denote the parameters, gradient, velocity, momentum, and weight decay respectively. The :math:`lars_lr` is defined by Eq. 6 in the paper. The Nesterov version is analogously modified. .. warning:: Parameters with weight decay set to 0 will automatically be excluded from layer-wise LR scaling. This is to ensure consistency with papers like SimCLR and BYOL. """ def __init__( self, params, lr=None, momentum=0, dampening=0, weight_decay=0, nesterov=False, trust_coefficient=0.001, eps=1e-8, ): if lr is None or lr < 0.0: raise ValueError(f"Invalid learning rate: {lr}") if momentum < 0.0: raise ValueError(f"Invalid momentum value: {momentum}") if weight_decay < 0.0: raise ValueError(f"Invalid weight_decay value: {weight_decay}") defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, trust_coefficient=trust_coefficient, eps=eps, ) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) # MASKED: step function (lines 128-179)
@torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() # exclude scaling for params with 0 weight decay for group in self.param_groups: weight_decay = group["weight_decay"] momentum = group["momentum"] dampening = group["dampening"] nesterov = group["nesterov"] for p in group["params"]: if p.grad is None: continue d_p = p.grad p_norm = torch.norm(p.data) g_norm = torch.norm(p.grad.data) # lars scaling + weight decay part if weight_decay != 0: if p_norm != 0 and g_norm != 0: lars_lr = p_norm / (g_norm + p_norm * weight_decay + group["eps"]) lars_lr *= group["trust_coefficient"] d_p = d_p.add(p, alpha=weight_decay) d_p *= lars_lr # sgd part if momentum != 0: param_state = self.state[p] if "momentum_buffer" not in param_state: buf = param_state["momentum_buffer"] = torch.clone(d_p).detach() else: buf = param_state["momentum_buffer"] buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf p.add_(d_p, alpha=-group["lr"]) return loss
128
179
from typing import Tuple import math import torch from torch.optim.optimizer import Optimizer def linear_warmup_and_cosine_protocol( f_values: Tuple[float, float, float], x_milestones: Tuple[int, int, int, int]): """ There are 5 regions: 1. constant at f0 for x < x0 2. linear increase from f0 to f1 for x0 < x < x1 3. constant at f1 for x1 < x < x2 4. cosine protocol from f1 to f2 for x2 < x < x3 5. constant at f2 for x > x3 If you want a linear_ramp followed by a cosine_decay only simply set: 1. x0=0 (to eliminate the first constant piece) 2. x2=x1 (to eliminate the second constant piece) 3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay) """ assert x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3] def fn(step): if step <= x_milestones[0]: return float(f_values[0]) elif (step > x_milestones[0]) and (step <= x_milestones[1]): m = float(f_values[1] - f_values[0]) / float(max(1, x_milestones[1] - x_milestones[0])) return float(f_values[0]) + m * float(step - x_milestones[0]) elif (step > x_milestones[1]) and (step <= x_milestones[2]): return float(f_values[1]) elif (step > x_milestones[2]) and (step <= x_milestones[3]): progress = float(step - x_milestones[2]) / float(max(1, x_milestones[3] - x_milestones[2])) # in (0,1) tmp = 0.5 * (1.0 + math.cos(math.pi * progress)) # in (1,0) return float(f_values[2]) + tmp * float(f_values[1] - f_values[2]) else: return float(f_values[2]) return fn class LARS(Optimizer): """ Extends SGD in PyTorch with LARS scaling from the paper 'Large batch training of Convolutional Networks <https://arxiv.org/pdf/1708.03888.pdf>'_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float): learning rate momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) trust_coefficient (float, optional): trust coefficient for computing LR (default: 0.001) eps (float, optional): eps for division denominator (default: 1e-8) Example: >>> model = torch.nn.Linear(10, 1) >>> input = torch.Tensor(10) >>> target = torch.Tensor([1.]) >>> loss_fn = lambda input, target: (input - target) ** 2 >>> # >>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9) >>> optimizer.zero_grad() >>> loss_fn(model(input), target).backward() >>> optimizer.step() Note: The application of momentum in the SGD part is modified according to the PyTorch standards. LARS scaling fits into the equation in the following fashion. .. math:: \begin{aligned} g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\ v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\ p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, \\end{aligned} where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` denote the parameters, gradient, velocity, momentum, and weight decay respectively. The :math:`lars_lr` is defined by Eq. 6 in the paper. The Nesterov version is analogously modified. .. warning:: Parameters with weight decay set to 0 will automatically be excluded from layer-wise LR scaling. This is to ensure consistency with papers like SimCLR and BYOL. """ def __init__( self, params, lr=None, momentum=0, dampening=0, weight_decay=0, nesterov=False, trust_coefficient=0.001, eps=1e-8, ): if lr is None or lr < 0.0: raise ValueError(f"Invalid learning rate: {lr}") if momentum < 0.0: raise ValueError(f"Invalid momentum value: {momentum}") if weight_decay < 0.0: raise ValueError(f"Invalid weight_decay value: {weight_decay}") defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, trust_coefficient=trust_coefficient, eps=eps, ) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() # exclude scaling for params with 0 weight decay for group in self.param_groups: weight_decay = group["weight_decay"] momentum = group["momentum"] dampening = group["dampening"] nesterov = group["nesterov"] for p in group["params"]: if p.grad is None: continue d_p = p.grad p_norm = torch.norm(p.data) g_norm = torch.norm(p.grad.data) # lars scaling + weight decay part if weight_decay != 0: if p_norm != 0 and g_norm != 0: lars_lr = p_norm / (g_norm + p_norm * weight_decay + group["eps"]) lars_lr *= group["trust_coefficient"] d_p = d_p.add(p, alpha=weight_decay) d_p *= lars_lr # sgd part if momentum != 0: param_state = self.state[p] if "momentum_buffer" not in param_state: buf = param_state["momentum_buffer"] = torch.clone(d_p).detach() else: buf = param_state["momentum_buffer"] buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf p.add_(d_p, alpha=-group["lr"]) return loss
equation_checker
Check equation for balanced parentheses Args: equation(string): String form of equation Returns: bool: Return if parentheses are balanced or not
""" In this exercise you are going to apply what you learned about stacks with a real world problem. We will be using stacks to make sure the parentheses are balanced in mathematical expressions such as: ((3^2 + 8)*(5/2))/(2+6) In real life you can see this extend to many things such as text editor plugins and interactive development environments for all sorts of bracket completion checks. Take a string as an input and return `True` if it's parentheses are balanced or `False` if it is not. """ from typing import List class Stack: def __init__(self): self.items = [] def size(self): return len(self.items) def push(self, item): self.items.append(item) def pop(self): if self.size() == 0: return None else: return self.items.pop() # MASKED: equation_checker function (lines 32-51) def _equation_checker(equation): """ Check equation for balanced parentheses """ # not in the the spirit return equation.count("(") == equation.count(")") def udacity_equation_checker(equation): stack = Stack() for char in equation: if char == "(": stack.push(char) elif char == ")": if stack.pop() == None: return False return stack.size() == 0
def equation_checker(equation): """ Check equation for balanced parentheses Args: equation(string): String form of equation Returns: bool: Return if parentheses are balanced or not """ opening_parenthesis = Stack() closing_parenthesis = Stack() for _ in equation: if _ == "(": opening_parenthesis.push(_) elif _ == ")": closing_parenthesis.push(_) return opening_parenthesis.size() == closing_parenthesis.size()
32
51
""" In this exercise you are going to apply what you learned about stacks with a real world problem. We will be using stacks to make sure the parentheses are balanced in mathematical expressions such as: ((3^2 + 8)*(5/2))/(2+6) In real life you can see this extend to many things such as text editor plugins and interactive development environments for all sorts of bracket completion checks. Take a string as an input and return `True` if it's parentheses are balanced or `False` if it is not. """ from typing import List class Stack: def __init__(self): self.items = [] def size(self): return len(self.items) def push(self, item): self.items.append(item) def pop(self): if self.size() == 0: return None else: return self.items.pop() def equation_checker(equation): """ Check equation for balanced parentheses Args: equation(string): String form of equation Returns: bool: Return if parentheses are balanced or not """ opening_parenthesis = Stack() closing_parenthesis = Stack() for _ in equation: if _ == "(": opening_parenthesis.push(_) elif _ == ")": closing_parenthesis.push(_) return opening_parenthesis.size() == closing_parenthesis.size() def _equation_checker(equation): """ Check equation for balanced parentheses """ # not in the the spirit return equation.count("(") == equation.count(")") def udacity_equation_checker(equation): stack = Stack() for char in equation: if char == "(": stack.push(char) elif char == ")": if stack.pop() == None: return False return stack.size() == 0
__handleInline
Process string with inline patterns and replace it with placeholders Keyword arguments: * data: A line of Markdown text * patternIndex: The index of the inlinePattern to start with Returns: String with placeholders.
import re from . import inlinepatterns from . import util from . import odict def build_treeprocessors(md_instance, **kwargs): """ Build the default treeprocessors for Markdown. """ treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors def isString(s): """ Check if it's string """ if not isinstance(s, util.AtomicString): return isinstance(s, str) return False class Processor: def __init__(self, markdown_instance=None): if markdown_instance: self.markdown = markdown_instance class Treeprocessor(Processor): """ Treeprocessors are run on the ElementTree object before serialization. Each Treeprocessor implements a "run" method that takes a pointer to an ElementTree, modifies it as necessary and returns an ElementTree object. Treeprocessors must extend markdown.Treeprocessor. """ def run(self, root): """ Subclasses of Treeprocessor should implement a `run` method, which takes a root ElementTree. This method can return another ElementTree object, and the existing root ElementTree will be replaced, or it can modify the current tree and return None. """ pass class InlineProcessor(Treeprocessor): """ A Treeprocessor that traverses a tree, applying inline patterns. """ def __init__(self, md): self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX self.__placeholder_suffix = util.ETX self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + len(self.__placeholder_suffix) self.__placeholder_re = util.INLINE_PLACEHOLDER_RE self.markdown = md def __makePlaceholder(self, type): """ Generate a placeholder """ id = "%04d" % len(self.stashed_nodes) hash = util.INLINE_PLACEHOLDER % id return hash, id def __findPlaceholder(self, data, index): """ Extract id from data string, start from index Keyword arguments: * data: string * index: index, from which we start search Returns: placeholder id and string index, after the found placeholder. """ m = self.__placeholder_re.search(data, index) if m: return m.group(1), m.end() else: return None, index + 1 def __stashNode(self, node, type): """ Add node to stash """ placeholder, id = self.__makePlaceholder(type) self.stashed_nodes[id] = node return placeholder # MASKED: __handleInline function (lines 92-113) def __processElementText(self, node, subnode, isText=True): """ Process placeholders in Element.text or Element.tail of Elements popped from self.stashed_nodes. Keywords arguments: * node: parent node * subnode: processing node * isText: bool variable, True - it's text, False - it's tail Returns: None """ if isText: text = subnode.text subnode.text = None else: text = subnode.tail subnode.tail = None childResult = self.__processPlaceholders(text, subnode) if not isText and node is not subnode: pos = node.getchildren().index(subnode) node.remove(subnode) else: pos = 0 childResult.reverse() for newChild in childResult: node.insert(pos, newChild) def __processPlaceholders(self, data, parent): """ Process string with placeholders and generate ElementTree tree. Keyword arguments: * data: string with placeholders instead of ElementTree elements. * parent: Element, which contains processing inline data Returns: list with ElementTree elements with applied inline patterns. """ def linkText(text): if text: if result: if result[-1].tail: result[-1].tail += text else: result[-1].tail = text else: if parent.text: parent.text += text else: parent.text = text result = [] strartIndex = 0 while data: index = data.find(self.__placeholder_prefix, strartIndex) if index != -1: id, phEndIndex = self.__findPlaceholder(data, index) if id in self.stashed_nodes: node = self.stashed_nodes.get(id) if index > 0: text = data[strartIndex:index] linkText(text) if not isString(node): # it's Element for child in [node] + node.getchildren(): if child.tail: if child.tail.strip(): self.__processElementText(node, child,False) if child.text: if child.text.strip(): self.__processElementText(child, child) else: # it's just a string linkText(node) strartIndex = phEndIndex continue strartIndex = phEndIndex result.append(node) else: # wrong placeholder end = index + len(self.__placeholder_prefix) linkText(data[strartIndex:end]) strartIndex = end else: text = data[strartIndex:] if isinstance(data, util.AtomicString): # We don't want to loose the AtomicString text = util.AtomicString(text) linkText(text) data = "" return result def __applyPattern(self, pattern, data, patternIndex, startIndex=0): """ Check if the line fits the pattern, create the necessary elements, add it to stashed_nodes. Keyword arguments: * data: the text to be processed * pattern: the pattern to be checked * patternIndex: index of current pattern * startIndex: string index, from which we start searching Returns: String with placeholders instead of ElementTree elements. """ match = pattern.getCompiledRegExp().match(data[startIndex:]) leftData = data[:startIndex] if not match: return data, False, 0 node = pattern.handleMatch(match) if node is None: return data, True, len(leftData)+match.span(len(match.groups()))[0] if not isString(node): if not isinstance(node.text, util.AtomicString): # We need to process current node too for child in [node] + node.getchildren(): if not isString(node): if child.text: child.text = self.__handleInline(child.text, patternIndex + 1) if child.tail: child.tail = self.__handleInline(child.tail, patternIndex) placeholder = self.__stashNode(node, pattern.type()) return "%s%s%s%s" % (leftData, match.group(1), placeholder, match.groups()[-1]), True, 0 def run(self, tree): """Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, use subclass AtomicString: node.text = markdown.AtomicString("This will not be processed.") Arguments: * tree: ElementTree object, representing Markdown tree. Returns: ElementTree object with applied inline patterns. """ self.stashed_nodes = {} stack = [tree] while stack: currElement = stack.pop() insertQueue = [] for child in currElement.getchildren(): if child.text and not isinstance(child.text, util.AtomicString): text = child.text child.text = None lst = self.__processPlaceholders(self.__handleInline( text), child) stack += lst insertQueue.append((child, lst)) if child.tail: tail = self.__handleInline(child.tail) dumby = util.etree.Element('d') tailResult = self.__processPlaceholders(tail, dumby) if dumby.text: child.tail = dumby.text else: child.tail = None pos = currElement.getchildren().index(child) + 1 tailResult.reverse() for newChild in tailResult: currElement.insert(pos, newChild) if child.getchildren(): stack.append(child) for element, lst in insertQueue: if self.markdown.enable_attributes: if element.text: element.text = \ inlinepatterns.handleAttributes(element.text, element) i = 0 for newChild in lst: if self.markdown.enable_attributes: # Processing attributes if newChild.tail: newChild.tail = \ inlinepatterns.handleAttributes(newChild.tail, element) if newChild.text: newChild.text = \ inlinepatterns.handleAttributes(newChild.text, newChild) element.insert(i, newChild) i += 1 return tree class PrettifyTreeprocessor(Treeprocessor): """ Add linebreaks to the html document. """ def _prettifyETree(self, elem): """ Recursively add linebreaks to ElementTree children. """ i = "\n" if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: if (not elem.text or not elem.text.strip()) \ and len(elem) and util.isBlockLevel(elem[0].tag): elem.text = i for e in elem: if util.isBlockLevel(e.tag): self._prettifyETree(e) if not elem.tail or not elem.tail.strip(): elem.tail = i if not elem.tail or not elem.tail.strip(): elem.tail = i def run(self, root): """ Add linebreaks to ElementTree root object. """ self._prettifyETree(root) # Do <br />'s seperately as they are often in the middle of # inline content and missed by _prettifyETree. brs = root.getiterator('br') for br in brs: if not br.tail or not br.tail.strip(): br.tail = '\n' else: br.tail = '\n%s' % br.tail
def __handleInline(self, data, patternIndex=0): """ Process string with inline patterns and replace it with placeholders Keyword arguments: * data: A line of Markdown text * patternIndex: The index of the inlinePattern to start with Returns: String with placeholders. """ if not isinstance(data, util.AtomicString): startIndex = 0 while patternIndex < len(self.markdown.inlinePatterns): data, matched, startIndex = self.__applyPattern( self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex) if not matched: patternIndex += 1 return data
92
113
import re from . import inlinepatterns from . import util from . import odict def build_treeprocessors(md_instance, **kwargs): """ Build the default treeprocessors for Markdown. """ treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors def isString(s): """ Check if it's string """ if not isinstance(s, util.AtomicString): return isinstance(s, str) return False class Processor: def __init__(self, markdown_instance=None): if markdown_instance: self.markdown = markdown_instance class Treeprocessor(Processor): """ Treeprocessors are run on the ElementTree object before serialization. Each Treeprocessor implements a "run" method that takes a pointer to an ElementTree, modifies it as necessary and returns an ElementTree object. Treeprocessors must extend markdown.Treeprocessor. """ def run(self, root): """ Subclasses of Treeprocessor should implement a `run` method, which takes a root ElementTree. This method can return another ElementTree object, and the existing root ElementTree will be replaced, or it can modify the current tree and return None. """ pass class InlineProcessor(Treeprocessor): """ A Treeprocessor that traverses a tree, applying inline patterns. """ def __init__(self, md): self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX self.__placeholder_suffix = util.ETX self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + len(self.__placeholder_suffix) self.__placeholder_re = util.INLINE_PLACEHOLDER_RE self.markdown = md def __makePlaceholder(self, type): """ Generate a placeholder """ id = "%04d" % len(self.stashed_nodes) hash = util.INLINE_PLACEHOLDER % id return hash, id def __findPlaceholder(self, data, index): """ Extract id from data string, start from index Keyword arguments: * data: string * index: index, from which we start search Returns: placeholder id and string index, after the found placeholder. """ m = self.__placeholder_re.search(data, index) if m: return m.group(1), m.end() else: return None, index + 1 def __stashNode(self, node, type): """ Add node to stash """ placeholder, id = self.__makePlaceholder(type) self.stashed_nodes[id] = node return placeholder def __handleInline(self, data, patternIndex=0): """ Process string with inline patterns and replace it with placeholders Keyword arguments: * data: A line of Markdown text * patternIndex: The index of the inlinePattern to start with Returns: String with placeholders. """ if not isinstance(data, util.AtomicString): startIndex = 0 while patternIndex < len(self.markdown.inlinePatterns): data, matched, startIndex = self.__applyPattern( self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex) if not matched: patternIndex += 1 return data def __processElementText(self, node, subnode, isText=True): """ Process placeholders in Element.text or Element.tail of Elements popped from self.stashed_nodes. Keywords arguments: * node: parent node * subnode: processing node * isText: bool variable, True - it's text, False - it's tail Returns: None """ if isText: text = subnode.text subnode.text = None else: text = subnode.tail subnode.tail = None childResult = self.__processPlaceholders(text, subnode) if not isText and node is not subnode: pos = node.getchildren().index(subnode) node.remove(subnode) else: pos = 0 childResult.reverse() for newChild in childResult: node.insert(pos, newChild) def __processPlaceholders(self, data, parent): """ Process string with placeholders and generate ElementTree tree. Keyword arguments: * data: string with placeholders instead of ElementTree elements. * parent: Element, which contains processing inline data Returns: list with ElementTree elements with applied inline patterns. """ def linkText(text): if text: if result: if result[-1].tail: result[-1].tail += text else: result[-1].tail = text else: if parent.text: parent.text += text else: parent.text = text result = [] strartIndex = 0 while data: index = data.find(self.__placeholder_prefix, strartIndex) if index != -1: id, phEndIndex = self.__findPlaceholder(data, index) if id in self.stashed_nodes: node = self.stashed_nodes.get(id) if index > 0: text = data[strartIndex:index] linkText(text) if not isString(node): # it's Element for child in [node] + node.getchildren(): if child.tail: if child.tail.strip(): self.__processElementText(node, child,False) if child.text: if child.text.strip(): self.__processElementText(child, child) else: # it's just a string linkText(node) strartIndex = phEndIndex continue strartIndex = phEndIndex result.append(node) else: # wrong placeholder end = index + len(self.__placeholder_prefix) linkText(data[strartIndex:end]) strartIndex = end else: text = data[strartIndex:] if isinstance(data, util.AtomicString): # We don't want to loose the AtomicString text = util.AtomicString(text) linkText(text) data = "" return result def __applyPattern(self, pattern, data, patternIndex, startIndex=0): """ Check if the line fits the pattern, create the necessary elements, add it to stashed_nodes. Keyword arguments: * data: the text to be processed * pattern: the pattern to be checked * patternIndex: index of current pattern * startIndex: string index, from which we start searching Returns: String with placeholders instead of ElementTree elements. """ match = pattern.getCompiledRegExp().match(data[startIndex:]) leftData = data[:startIndex] if not match: return data, False, 0 node = pattern.handleMatch(match) if node is None: return data, True, len(leftData)+match.span(len(match.groups()))[0] if not isString(node): if not isinstance(node.text, util.AtomicString): # We need to process current node too for child in [node] + node.getchildren(): if not isString(node): if child.text: child.text = self.__handleInline(child.text, patternIndex + 1) if child.tail: child.tail = self.__handleInline(child.tail, patternIndex) placeholder = self.__stashNode(node, pattern.type()) return "%s%s%s%s" % (leftData, match.group(1), placeholder, match.groups()[-1]), True, 0 def run(self, tree): """Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, use subclass AtomicString: node.text = markdown.AtomicString("This will not be processed.") Arguments: * tree: ElementTree object, representing Markdown tree. Returns: ElementTree object with applied inline patterns. """ self.stashed_nodes = {} stack = [tree] while stack: currElement = stack.pop() insertQueue = [] for child in currElement.getchildren(): if child.text and not isinstance(child.text, util.AtomicString): text = child.text child.text = None lst = self.__processPlaceholders(self.__handleInline( text), child) stack += lst insertQueue.append((child, lst)) if child.tail: tail = self.__handleInline(child.tail) dumby = util.etree.Element('d') tailResult = self.__processPlaceholders(tail, dumby) if dumby.text: child.tail = dumby.text else: child.tail = None pos = currElement.getchildren().index(child) + 1 tailResult.reverse() for newChild in tailResult: currElement.insert(pos, newChild) if child.getchildren(): stack.append(child) for element, lst in insertQueue: if self.markdown.enable_attributes: if element.text: element.text = \ inlinepatterns.handleAttributes(element.text, element) i = 0 for newChild in lst: if self.markdown.enable_attributes: # Processing attributes if newChild.tail: newChild.tail = \ inlinepatterns.handleAttributes(newChild.tail, element) if newChild.text: newChild.text = \ inlinepatterns.handleAttributes(newChild.text, newChild) element.insert(i, newChild) i += 1 return tree class PrettifyTreeprocessor(Treeprocessor): """ Add linebreaks to the html document. """ def _prettifyETree(self, elem): """ Recursively add linebreaks to ElementTree children. """ i = "\n" if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: if (not elem.text or not elem.text.strip()) \ and len(elem) and util.isBlockLevel(elem[0].tag): elem.text = i for e in elem: if util.isBlockLevel(e.tag): self._prettifyETree(e) if not elem.tail or not elem.tail.strip(): elem.tail = i if not elem.tail or not elem.tail.strip(): elem.tail = i def run(self, root): """ Add linebreaks to ElementTree root object. """ self._prettifyETree(root) # Do <br />'s seperately as they are often in the middle of # inline content and missed by _prettifyETree. brs = root.getiterator('br') for br in brs: if not br.tail or not br.tail.strip(): br.tail = '\n' else: br.tail = '\n%s' % br.tail
__processPlaceholders
Process string with placeholders and generate ElementTree tree. Keyword arguments: * data: string with placeholders instead of ElementTree elements. * parent: Element, which contains processing inline data Returns: list with ElementTree elements with applied inline patterns.
import re from . import inlinepatterns from . import util from . import odict def build_treeprocessors(md_instance, **kwargs): """ Build the default treeprocessors for Markdown. """ treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors def isString(s): """ Check if it's string """ if not isinstance(s, util.AtomicString): return isinstance(s, str) return False class Processor: def __init__(self, markdown_instance=None): if markdown_instance: self.markdown = markdown_instance class Treeprocessor(Processor): """ Treeprocessors are run on the ElementTree object before serialization. Each Treeprocessor implements a "run" method that takes a pointer to an ElementTree, modifies it as necessary and returns an ElementTree object. Treeprocessors must extend markdown.Treeprocessor. """ def run(self, root): """ Subclasses of Treeprocessor should implement a `run` method, which takes a root ElementTree. This method can return another ElementTree object, and the existing root ElementTree will be replaced, or it can modify the current tree and return None. """ pass class InlineProcessor(Treeprocessor): """ A Treeprocessor that traverses a tree, applying inline patterns. """ def __init__(self, md): self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX self.__placeholder_suffix = util.ETX self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + len(self.__placeholder_suffix) self.__placeholder_re = util.INLINE_PLACEHOLDER_RE self.markdown = md def __makePlaceholder(self, type): """ Generate a placeholder """ id = "%04d" % len(self.stashed_nodes) hash = util.INLINE_PLACEHOLDER % id return hash, id def __findPlaceholder(self, data, index): """ Extract id from data string, start from index Keyword arguments: * data: string * index: index, from which we start search Returns: placeholder id and string index, after the found placeholder. """ m = self.__placeholder_re.search(data, index) if m: return m.group(1), m.end() else: return None, index + 1 def __stashNode(self, node, type): """ Add node to stash """ placeholder, id = self.__makePlaceholder(type) self.stashed_nodes[id] = node return placeholder def __handleInline(self, data, patternIndex=0): """ Process string with inline patterns and replace it with placeholders Keyword arguments: * data: A line of Markdown text * patternIndex: The index of the inlinePattern to start with Returns: String with placeholders. """ if not isinstance(data, util.AtomicString): startIndex = 0 while patternIndex < len(self.markdown.inlinePatterns): data, matched, startIndex = self.__applyPattern( self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex) if not matched: patternIndex += 1 return data def __processElementText(self, node, subnode, isText=True): """ Process placeholders in Element.text or Element.tail of Elements popped from self.stashed_nodes. Keywords arguments: * node: parent node * subnode: processing node * isText: bool variable, True - it's text, False - it's tail Returns: None """ if isText: text = subnode.text subnode.text = None else: text = subnode.tail subnode.tail = None childResult = self.__processPlaceholders(text, subnode) if not isText and node is not subnode: pos = node.getchildren().index(subnode) node.remove(subnode) else: pos = 0 childResult.reverse() for newChild in childResult: node.insert(pos, newChild) # MASKED: __processPlaceholders function (lines 148-214) def __applyPattern(self, pattern, data, patternIndex, startIndex=0): """ Check if the line fits the pattern, create the necessary elements, add it to stashed_nodes. Keyword arguments: * data: the text to be processed * pattern: the pattern to be checked * patternIndex: index of current pattern * startIndex: string index, from which we start searching Returns: String with placeholders instead of ElementTree elements. """ match = pattern.getCompiledRegExp().match(data[startIndex:]) leftData = data[:startIndex] if not match: return data, False, 0 node = pattern.handleMatch(match) if node is None: return data, True, len(leftData)+match.span(len(match.groups()))[0] if not isString(node): if not isinstance(node.text, util.AtomicString): # We need to process current node too for child in [node] + node.getchildren(): if not isString(node): if child.text: child.text = self.__handleInline(child.text, patternIndex + 1) if child.tail: child.tail = self.__handleInline(child.tail, patternIndex) placeholder = self.__stashNode(node, pattern.type()) return "%s%s%s%s" % (leftData, match.group(1), placeholder, match.groups()[-1]), True, 0 def run(self, tree): """Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, use subclass AtomicString: node.text = markdown.AtomicString("This will not be processed.") Arguments: * tree: ElementTree object, representing Markdown tree. Returns: ElementTree object with applied inline patterns. """ self.stashed_nodes = {} stack = [tree] while stack: currElement = stack.pop() insertQueue = [] for child in currElement.getchildren(): if child.text and not isinstance(child.text, util.AtomicString): text = child.text child.text = None lst = self.__processPlaceholders(self.__handleInline( text), child) stack += lst insertQueue.append((child, lst)) if child.tail: tail = self.__handleInline(child.tail) dumby = util.etree.Element('d') tailResult = self.__processPlaceholders(tail, dumby) if dumby.text: child.tail = dumby.text else: child.tail = None pos = currElement.getchildren().index(child) + 1 tailResult.reverse() for newChild in tailResult: currElement.insert(pos, newChild) if child.getchildren(): stack.append(child) for element, lst in insertQueue: if self.markdown.enable_attributes: if element.text: element.text = \ inlinepatterns.handleAttributes(element.text, element) i = 0 for newChild in lst: if self.markdown.enable_attributes: # Processing attributes if newChild.tail: newChild.tail = \ inlinepatterns.handleAttributes(newChild.tail, element) if newChild.text: newChild.text = \ inlinepatterns.handleAttributes(newChild.text, newChild) element.insert(i, newChild) i += 1 return tree class PrettifyTreeprocessor(Treeprocessor): """ Add linebreaks to the html document. """ def _prettifyETree(self, elem): """ Recursively add linebreaks to ElementTree children. """ i = "\n" if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: if (not elem.text or not elem.text.strip()) \ and len(elem) and util.isBlockLevel(elem[0].tag): elem.text = i for e in elem: if util.isBlockLevel(e.tag): self._prettifyETree(e) if not elem.tail or not elem.tail.strip(): elem.tail = i if not elem.tail or not elem.tail.strip(): elem.tail = i def run(self, root): """ Add linebreaks to ElementTree root object. """ self._prettifyETree(root) # Do <br />'s seperately as they are often in the middle of # inline content and missed by _prettifyETree. brs = root.getiterator('br') for br in brs: if not br.tail or not br.tail.strip(): br.tail = '\n' else: br.tail = '\n%s' % br.tail
def __processPlaceholders(self, data, parent): """ Process string with placeholders and generate ElementTree tree. Keyword arguments: * data: string with placeholders instead of ElementTree elements. * parent: Element, which contains processing inline data Returns: list with ElementTree elements with applied inline patterns. """ def linkText(text): if text: if result: if result[-1].tail: result[-1].tail += text else: result[-1].tail = text else: if parent.text: parent.text += text else: parent.text = text result = [] strartIndex = 0 while data: index = data.find(self.__placeholder_prefix, strartIndex) if index != -1: id, phEndIndex = self.__findPlaceholder(data, index) if id in self.stashed_nodes: node = self.stashed_nodes.get(id) if index > 0: text = data[strartIndex:index] linkText(text) if not isString(node): # it's Element for child in [node] + node.getchildren(): if child.tail: if child.tail.strip(): self.__processElementText(node, child,False) if child.text: if child.text.strip(): self.__processElementText(child, child) else: # it's just a string linkText(node) strartIndex = phEndIndex continue strartIndex = phEndIndex result.append(node) else: # wrong placeholder end = index + len(self.__placeholder_prefix) linkText(data[strartIndex:end]) strartIndex = end else: text = data[strartIndex:] if isinstance(data, util.AtomicString): # We don't want to loose the AtomicString text = util.AtomicString(text) linkText(text) data = "" return result
148
214
import re from . import inlinepatterns from . import util from . import odict def build_treeprocessors(md_instance, **kwargs): """ Build the default treeprocessors for Markdown. """ treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors def isString(s): """ Check if it's string """ if not isinstance(s, util.AtomicString): return isinstance(s, str) return False class Processor: def __init__(self, markdown_instance=None): if markdown_instance: self.markdown = markdown_instance class Treeprocessor(Processor): """ Treeprocessors are run on the ElementTree object before serialization. Each Treeprocessor implements a "run" method that takes a pointer to an ElementTree, modifies it as necessary and returns an ElementTree object. Treeprocessors must extend markdown.Treeprocessor. """ def run(self, root): """ Subclasses of Treeprocessor should implement a `run` method, which takes a root ElementTree. This method can return another ElementTree object, and the existing root ElementTree will be replaced, or it can modify the current tree and return None. """ pass class InlineProcessor(Treeprocessor): """ A Treeprocessor that traverses a tree, applying inline patterns. """ def __init__(self, md): self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX self.__placeholder_suffix = util.ETX self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + len(self.__placeholder_suffix) self.__placeholder_re = util.INLINE_PLACEHOLDER_RE self.markdown = md def __makePlaceholder(self, type): """ Generate a placeholder """ id = "%04d" % len(self.stashed_nodes) hash = util.INLINE_PLACEHOLDER % id return hash, id def __findPlaceholder(self, data, index): """ Extract id from data string, start from index Keyword arguments: * data: string * index: index, from which we start search Returns: placeholder id and string index, after the found placeholder. """ m = self.__placeholder_re.search(data, index) if m: return m.group(1), m.end() else: return None, index + 1 def __stashNode(self, node, type): """ Add node to stash """ placeholder, id = self.__makePlaceholder(type) self.stashed_nodes[id] = node return placeholder def __handleInline(self, data, patternIndex=0): """ Process string with inline patterns and replace it with placeholders Keyword arguments: * data: A line of Markdown text * patternIndex: The index of the inlinePattern to start with Returns: String with placeholders. """ if not isinstance(data, util.AtomicString): startIndex = 0 while patternIndex < len(self.markdown.inlinePatterns): data, matched, startIndex = self.__applyPattern( self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex) if not matched: patternIndex += 1 return data def __processElementText(self, node, subnode, isText=True): """ Process placeholders in Element.text or Element.tail of Elements popped from self.stashed_nodes. Keywords arguments: * node: parent node * subnode: processing node * isText: bool variable, True - it's text, False - it's tail Returns: None """ if isText: text = subnode.text subnode.text = None else: text = subnode.tail subnode.tail = None childResult = self.__processPlaceholders(text, subnode) if not isText and node is not subnode: pos = node.getchildren().index(subnode) node.remove(subnode) else: pos = 0 childResult.reverse() for newChild in childResult: node.insert(pos, newChild) def __processPlaceholders(self, data, parent): """ Process string with placeholders and generate ElementTree tree. Keyword arguments: * data: string with placeholders instead of ElementTree elements. * parent: Element, which contains processing inline data Returns: list with ElementTree elements with applied inline patterns. """ def linkText(text): if text: if result: if result[-1].tail: result[-1].tail += text else: result[-1].tail = text else: if parent.text: parent.text += text else: parent.text = text result = [] strartIndex = 0 while data: index = data.find(self.__placeholder_prefix, strartIndex) if index != -1: id, phEndIndex = self.__findPlaceholder(data, index) if id in self.stashed_nodes: node = self.stashed_nodes.get(id) if index > 0: text = data[strartIndex:index] linkText(text) if not isString(node): # it's Element for child in [node] + node.getchildren(): if child.tail: if child.tail.strip(): self.__processElementText(node, child,False) if child.text: if child.text.strip(): self.__processElementText(child, child) else: # it's just a string linkText(node) strartIndex = phEndIndex continue strartIndex = phEndIndex result.append(node) else: # wrong placeholder end = index + len(self.__placeholder_prefix) linkText(data[strartIndex:end]) strartIndex = end else: text = data[strartIndex:] if isinstance(data, util.AtomicString): # We don't want to loose the AtomicString text = util.AtomicString(text) linkText(text) data = "" return result def __applyPattern(self, pattern, data, patternIndex, startIndex=0): """ Check if the line fits the pattern, create the necessary elements, add it to stashed_nodes. Keyword arguments: * data: the text to be processed * pattern: the pattern to be checked * patternIndex: index of current pattern * startIndex: string index, from which we start searching Returns: String with placeholders instead of ElementTree elements. """ match = pattern.getCompiledRegExp().match(data[startIndex:]) leftData = data[:startIndex] if not match: return data, False, 0 node = pattern.handleMatch(match) if node is None: return data, True, len(leftData)+match.span(len(match.groups()))[0] if not isString(node): if not isinstance(node.text, util.AtomicString): # We need to process current node too for child in [node] + node.getchildren(): if not isString(node): if child.text: child.text = self.__handleInline(child.text, patternIndex + 1) if child.tail: child.tail = self.__handleInline(child.tail, patternIndex) placeholder = self.__stashNode(node, pattern.type()) return "%s%s%s%s" % (leftData, match.group(1), placeholder, match.groups()[-1]), True, 0 def run(self, tree): """Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, use subclass AtomicString: node.text = markdown.AtomicString("This will not be processed.") Arguments: * tree: ElementTree object, representing Markdown tree. Returns: ElementTree object with applied inline patterns. """ self.stashed_nodes = {} stack = [tree] while stack: currElement = stack.pop() insertQueue = [] for child in currElement.getchildren(): if child.text and not isinstance(child.text, util.AtomicString): text = child.text child.text = None lst = self.__processPlaceholders(self.__handleInline( text), child) stack += lst insertQueue.append((child, lst)) if child.tail: tail = self.__handleInline(child.tail) dumby = util.etree.Element('d') tailResult = self.__processPlaceholders(tail, dumby) if dumby.text: child.tail = dumby.text else: child.tail = None pos = currElement.getchildren().index(child) + 1 tailResult.reverse() for newChild in tailResult: currElement.insert(pos, newChild) if child.getchildren(): stack.append(child) for element, lst in insertQueue: if self.markdown.enable_attributes: if element.text: element.text = \ inlinepatterns.handleAttributes(element.text, element) i = 0 for newChild in lst: if self.markdown.enable_attributes: # Processing attributes if newChild.tail: newChild.tail = \ inlinepatterns.handleAttributes(newChild.tail, element) if newChild.text: newChild.text = \ inlinepatterns.handleAttributes(newChild.text, newChild) element.insert(i, newChild) i += 1 return tree class PrettifyTreeprocessor(Treeprocessor): """ Add linebreaks to the html document. """ def _prettifyETree(self, elem): """ Recursively add linebreaks to ElementTree children. """ i = "\n" if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: if (not elem.text or not elem.text.strip()) \ and len(elem) and util.isBlockLevel(elem[0].tag): elem.text = i for e in elem: if util.isBlockLevel(e.tag): self._prettifyETree(e) if not elem.tail or not elem.tail.strip(): elem.tail = i if not elem.tail or not elem.tail.strip(): elem.tail = i def run(self, root): """ Add linebreaks to ElementTree root object. """ self._prettifyETree(root) # Do <br />'s seperately as they are often in the middle of # inline content and missed by _prettifyETree. brs = root.getiterator('br') for br in brs: if not br.tail or not br.tail.strip(): br.tail = '\n' else: br.tail = '\n%s' % br.tail
run
Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, use subclass AtomicString: node.text = markdown.AtomicString("This will not be processed.") Arguments: * tree: ElementTree object, representing Markdown tree. Returns: ElementTree object with applied inline patterns.
import re from . import inlinepatterns from . import util from . import odict def build_treeprocessors(md_instance, **kwargs): """ Build the default treeprocessors for Markdown. """ treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors def isString(s): """ Check if it's string """ if not isinstance(s, util.AtomicString): return isinstance(s, str) return False class Processor: def __init__(self, markdown_instance=None): if markdown_instance: self.markdown = markdown_instance class Treeprocessor(Processor): """ Treeprocessors are run on the ElementTree object before serialization. Each Treeprocessor implements a "run" method that takes a pointer to an ElementTree, modifies it as necessary and returns an ElementTree object. Treeprocessors must extend markdown.Treeprocessor. """ def run(self, root): """ Subclasses of Treeprocessor should implement a `run` method, which takes a root ElementTree. This method can return another ElementTree object, and the existing root ElementTree will be replaced, or it can modify the current tree and return None. """ pass class InlineProcessor(Treeprocessor): """ A Treeprocessor that traverses a tree, applying inline patterns. """ def __init__(self, md): self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX self.__placeholder_suffix = util.ETX self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + len(self.__placeholder_suffix) self.__placeholder_re = util.INLINE_PLACEHOLDER_RE self.markdown = md def __makePlaceholder(self, type): """ Generate a placeholder """ id = "%04d" % len(self.stashed_nodes) hash = util.INLINE_PLACEHOLDER % id return hash, id def __findPlaceholder(self, data, index): """ Extract id from data string, start from index Keyword arguments: * data: string * index: index, from which we start search Returns: placeholder id and string index, after the found placeholder. """ m = self.__placeholder_re.search(data, index) if m: return m.group(1), m.end() else: return None, index + 1 def __stashNode(self, node, type): """ Add node to stash """ placeholder, id = self.__makePlaceholder(type) self.stashed_nodes[id] = node return placeholder def __handleInline(self, data, patternIndex=0): """ Process string with inline patterns and replace it with placeholders Keyword arguments: * data: A line of Markdown text * patternIndex: The index of the inlinePattern to start with Returns: String with placeholders. """ if not isinstance(data, util.AtomicString): startIndex = 0 while patternIndex < len(self.markdown.inlinePatterns): data, matched, startIndex = self.__applyPattern( self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex) if not matched: patternIndex += 1 return data def __processElementText(self, node, subnode, isText=True): """ Process placeholders in Element.text or Element.tail of Elements popped from self.stashed_nodes. Keywords arguments: * node: parent node * subnode: processing node * isText: bool variable, True - it's text, False - it's tail Returns: None """ if isText: text = subnode.text subnode.text = None else: text = subnode.tail subnode.tail = None childResult = self.__processPlaceholders(text, subnode) if not isText and node is not subnode: pos = node.getchildren().index(subnode) node.remove(subnode) else: pos = 0 childResult.reverse() for newChild in childResult: node.insert(pos, newChild) def __processPlaceholders(self, data, parent): """ Process string with placeholders and generate ElementTree tree. Keyword arguments: * data: string with placeholders instead of ElementTree elements. * parent: Element, which contains processing inline data Returns: list with ElementTree elements with applied inline patterns. """ def linkText(text): if text: if result: if result[-1].tail: result[-1].tail += text else: result[-1].tail = text else: if parent.text: parent.text += text else: parent.text = text result = [] strartIndex = 0 while data: index = data.find(self.__placeholder_prefix, strartIndex) if index != -1: id, phEndIndex = self.__findPlaceholder(data, index) if id in self.stashed_nodes: node = self.stashed_nodes.get(id) if index > 0: text = data[strartIndex:index] linkText(text) if not isString(node): # it's Element for child in [node] + node.getchildren(): if child.tail: if child.tail.strip(): self.__processElementText(node, child,False) if child.text: if child.text.strip(): self.__processElementText(child, child) else: # it's just a string linkText(node) strartIndex = phEndIndex continue strartIndex = phEndIndex result.append(node) else: # wrong placeholder end = index + len(self.__placeholder_prefix) linkText(data[strartIndex:end]) strartIndex = end else: text = data[strartIndex:] if isinstance(data, util.AtomicString): # We don't want to loose the AtomicString text = util.AtomicString(text) linkText(text) data = "" return result def __applyPattern(self, pattern, data, patternIndex, startIndex=0): """ Check if the line fits the pattern, create the necessary elements, add it to stashed_nodes. Keyword arguments: * data: the text to be processed * pattern: the pattern to be checked * patternIndex: index of current pattern * startIndex: string index, from which we start searching Returns: String with placeholders instead of ElementTree elements. """ match = pattern.getCompiledRegExp().match(data[startIndex:]) leftData = data[:startIndex] if not match: return data, False, 0 node = pattern.handleMatch(match) if node is None: return data, True, len(leftData)+match.span(len(match.groups()))[0] if not isString(node): if not isinstance(node.text, util.AtomicString): # We need to process current node too for child in [node] + node.getchildren(): if not isString(node): if child.text: child.text = self.__handleInline(child.text, patternIndex + 1) if child.tail: child.tail = self.__handleInline(child.tail, patternIndex) placeholder = self.__stashNode(node, pattern.type()) return "%s%s%s%s" % (leftData, match.group(1), placeholder, match.groups()[-1]), True, 0 # MASKED: run function (lines 260-327) class PrettifyTreeprocessor(Treeprocessor): """ Add linebreaks to the html document. """ def _prettifyETree(self, elem): """ Recursively add linebreaks to ElementTree children. """ i = "\n" if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: if (not elem.text or not elem.text.strip()) \ and len(elem) and util.isBlockLevel(elem[0].tag): elem.text = i for e in elem: if util.isBlockLevel(e.tag): self._prettifyETree(e) if not elem.tail or not elem.tail.strip(): elem.tail = i if not elem.tail or not elem.tail.strip(): elem.tail = i def run(self, root): """ Add linebreaks to ElementTree root object. """ self._prettifyETree(root) # Do <br />'s seperately as they are often in the middle of # inline content and missed by _prettifyETree. brs = root.getiterator('br') for br in brs: if not br.tail or not br.tail.strip(): br.tail = '\n' else: br.tail = '\n%s' % br.tail
def run(self, tree): """Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, use subclass AtomicString: node.text = markdown.AtomicString("This will not be processed.") Arguments: * tree: ElementTree object, representing Markdown tree. Returns: ElementTree object with applied inline patterns. """ self.stashed_nodes = {} stack = [tree] while stack: currElement = stack.pop() insertQueue = [] for child in currElement.getchildren(): if child.text and not isinstance(child.text, util.AtomicString): text = child.text child.text = None lst = self.__processPlaceholders(self.__handleInline( text), child) stack += lst insertQueue.append((child, lst)) if child.tail: tail = self.__handleInline(child.tail) dumby = util.etree.Element('d') tailResult = self.__processPlaceholders(tail, dumby) if dumby.text: child.tail = dumby.text else: child.tail = None pos = currElement.getchildren().index(child) + 1 tailResult.reverse() for newChild in tailResult: currElement.insert(pos, newChild) if child.getchildren(): stack.append(child) for element, lst in insertQueue: if self.markdown.enable_attributes: if element.text: element.text = \ inlinepatterns.handleAttributes(element.text, element) i = 0 for newChild in lst: if self.markdown.enable_attributes: # Processing attributes if newChild.tail: newChild.tail = \ inlinepatterns.handleAttributes(newChild.tail, element) if newChild.text: newChild.text = \ inlinepatterns.handleAttributes(newChild.text, newChild) element.insert(i, newChild) i += 1 return tree
260
327
import re from . import inlinepatterns from . import util from . import odict def build_treeprocessors(md_instance, **kwargs): """ Build the default treeprocessors for Markdown. """ treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors def isString(s): """ Check if it's string """ if not isinstance(s, util.AtomicString): return isinstance(s, str) return False class Processor: def __init__(self, markdown_instance=None): if markdown_instance: self.markdown = markdown_instance class Treeprocessor(Processor): """ Treeprocessors are run on the ElementTree object before serialization. Each Treeprocessor implements a "run" method that takes a pointer to an ElementTree, modifies it as necessary and returns an ElementTree object. Treeprocessors must extend markdown.Treeprocessor. """ def run(self, root): """ Subclasses of Treeprocessor should implement a `run` method, which takes a root ElementTree. This method can return another ElementTree object, and the existing root ElementTree will be replaced, or it can modify the current tree and return None. """ pass class InlineProcessor(Treeprocessor): """ A Treeprocessor that traverses a tree, applying inline patterns. """ def __init__(self, md): self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX self.__placeholder_suffix = util.ETX self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + len(self.__placeholder_suffix) self.__placeholder_re = util.INLINE_PLACEHOLDER_RE self.markdown = md def __makePlaceholder(self, type): """ Generate a placeholder """ id = "%04d" % len(self.stashed_nodes) hash = util.INLINE_PLACEHOLDER % id return hash, id def __findPlaceholder(self, data, index): """ Extract id from data string, start from index Keyword arguments: * data: string * index: index, from which we start search Returns: placeholder id and string index, after the found placeholder. """ m = self.__placeholder_re.search(data, index) if m: return m.group(1), m.end() else: return None, index + 1 def __stashNode(self, node, type): """ Add node to stash """ placeholder, id = self.__makePlaceholder(type) self.stashed_nodes[id] = node return placeholder def __handleInline(self, data, patternIndex=0): """ Process string with inline patterns and replace it with placeholders Keyword arguments: * data: A line of Markdown text * patternIndex: The index of the inlinePattern to start with Returns: String with placeholders. """ if not isinstance(data, util.AtomicString): startIndex = 0 while patternIndex < len(self.markdown.inlinePatterns): data, matched, startIndex = self.__applyPattern( self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex) if not matched: patternIndex += 1 return data def __processElementText(self, node, subnode, isText=True): """ Process placeholders in Element.text or Element.tail of Elements popped from self.stashed_nodes. Keywords arguments: * node: parent node * subnode: processing node * isText: bool variable, True - it's text, False - it's tail Returns: None """ if isText: text = subnode.text subnode.text = None else: text = subnode.tail subnode.tail = None childResult = self.__processPlaceholders(text, subnode) if not isText and node is not subnode: pos = node.getchildren().index(subnode) node.remove(subnode) else: pos = 0 childResult.reverse() for newChild in childResult: node.insert(pos, newChild) def __processPlaceholders(self, data, parent): """ Process string with placeholders and generate ElementTree tree. Keyword arguments: * data: string with placeholders instead of ElementTree elements. * parent: Element, which contains processing inline data Returns: list with ElementTree elements with applied inline patterns. """ def linkText(text): if text: if result: if result[-1].tail: result[-1].tail += text else: result[-1].tail = text else: if parent.text: parent.text += text else: parent.text = text result = [] strartIndex = 0 while data: index = data.find(self.__placeholder_prefix, strartIndex) if index != -1: id, phEndIndex = self.__findPlaceholder(data, index) if id in self.stashed_nodes: node = self.stashed_nodes.get(id) if index > 0: text = data[strartIndex:index] linkText(text) if not isString(node): # it's Element for child in [node] + node.getchildren(): if child.tail: if child.tail.strip(): self.__processElementText(node, child,False) if child.text: if child.text.strip(): self.__processElementText(child, child) else: # it's just a string linkText(node) strartIndex = phEndIndex continue strartIndex = phEndIndex result.append(node) else: # wrong placeholder end = index + len(self.__placeholder_prefix) linkText(data[strartIndex:end]) strartIndex = end else: text = data[strartIndex:] if isinstance(data, util.AtomicString): # We don't want to loose the AtomicString text = util.AtomicString(text) linkText(text) data = "" return result def __applyPattern(self, pattern, data, patternIndex, startIndex=0): """ Check if the line fits the pattern, create the necessary elements, add it to stashed_nodes. Keyword arguments: * data: the text to be processed * pattern: the pattern to be checked * patternIndex: index of current pattern * startIndex: string index, from which we start searching Returns: String with placeholders instead of ElementTree elements. """ match = pattern.getCompiledRegExp().match(data[startIndex:]) leftData = data[:startIndex] if not match: return data, False, 0 node = pattern.handleMatch(match) if node is None: return data, True, len(leftData)+match.span(len(match.groups()))[0] if not isString(node): if not isinstance(node.text, util.AtomicString): # We need to process current node too for child in [node] + node.getchildren(): if not isString(node): if child.text: child.text = self.__handleInline(child.text, patternIndex + 1) if child.tail: child.tail = self.__handleInline(child.tail, patternIndex) placeholder = self.__stashNode(node, pattern.type()) return "%s%s%s%s" % (leftData, match.group(1), placeholder, match.groups()[-1]), True, 0 def run(self, tree): """Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, use subclass AtomicString: node.text = markdown.AtomicString("This will not be processed.") Arguments: * tree: ElementTree object, representing Markdown tree. Returns: ElementTree object with applied inline patterns. """ self.stashed_nodes = {} stack = [tree] while stack: currElement = stack.pop() insertQueue = [] for child in currElement.getchildren(): if child.text and not isinstance(child.text, util.AtomicString): text = child.text child.text = None lst = self.__processPlaceholders(self.__handleInline( text), child) stack += lst insertQueue.append((child, lst)) if child.tail: tail = self.__handleInline(child.tail) dumby = util.etree.Element('d') tailResult = self.__processPlaceholders(tail, dumby) if dumby.text: child.tail = dumby.text else: child.tail = None pos = currElement.getchildren().index(child) + 1 tailResult.reverse() for newChild in tailResult: currElement.insert(pos, newChild) if child.getchildren(): stack.append(child) for element, lst in insertQueue: if self.markdown.enable_attributes: if element.text: element.text = \ inlinepatterns.handleAttributes(element.text, element) i = 0 for newChild in lst: if self.markdown.enable_attributes: # Processing attributes if newChild.tail: newChild.tail = \ inlinepatterns.handleAttributes(newChild.tail, element) if newChild.text: newChild.text = \ inlinepatterns.handleAttributes(newChild.text, newChild) element.insert(i, newChild) i += 1 return tree class PrettifyTreeprocessor(Treeprocessor): """ Add linebreaks to the html document. """ def _prettifyETree(self, elem): """ Recursively add linebreaks to ElementTree children. """ i = "\n" if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: if (not elem.text or not elem.text.strip()) \ and len(elem) and util.isBlockLevel(elem[0].tag): elem.text = i for e in elem: if util.isBlockLevel(e.tag): self._prettifyETree(e) if not elem.tail or not elem.tail.strip(): elem.tail = i if not elem.tail or not elem.tail.strip(): elem.tail = i def run(self, root): """ Add linebreaks to ElementTree root object. """ self._prettifyETree(root) # Do <br />'s seperately as they are often in the middle of # inline content and missed by _prettifyETree. brs = root.getiterator('br') for br in brs: if not br.tail or not br.tail.strip(): br.tail = '\n' else: br.tail = '\n%s' % br.tail
__init__
This method adds additional items to the `self.general` dictionary which are only needed for validations using datasets. Arguments: - `valName`: String which identifies individual validation instances - `alignment`: `Alignment` instance to validate - `config`: `BetterConfigParser` instance which includes the configuration of the validations
from __future__ import print_function from __future__ import absolute_import from builtins import range from abc import ABCMeta, abstractmethod, abstractproperty import os import re import json from . import globalDictionaries from . import configTemplates from .dataset import Dataset from .helperFunctions import replaceByMap, addIndex, getCommandOutput2, boolfromstring, pythonboolstring from .TkAlExceptions import AllInOneError from six import with_metaclass class ValidationMetaClass(ABCMeta): sets = ["mandatories", "optionals", "needpackages"] dicts = ["defaults"] def __new__(cls, clsname, bases, dct): for setname in cls.sets: if setname not in dct: dct[setname] = set() dct[setname] = set.union(dct[setname], *(getattr(base, setname) for base in bases if hasattr(base, setname))) for dictname in cls.dicts: if dictname not in dct: dct[dictname] = {} for base in bases: if not hasattr(base, dictname): continue newdict = getattr(base, dictname) for key in set(newdict) & set(dct[dictname]): if newdict[key] != dct[dictname][key]: raise ValueError("Inconsistent values of defaults[{}]: {}, {}".format(key, newdict[key], dct[dictname][key])) dct[dictname].update(newdict) for setname in cls.sets: #e.g. removemandatories, used in preexistingvalidation #use with caution if "remove"+setname not in dct: dct["remove"+setname] = set() dct["remove"+setname] = set.union(dct["remove"+setname], *(getattr(base, "remove"+setname) for base in bases if hasattr(base, "remove"+setname))) dct[setname] -= dct["remove"+setname] return super(ValidationMetaClass, cls).__new__(cls, clsname, bases, dct) class GenericValidation(with_metaclass(ValidationMetaClass,object)): defaultReferenceName = "DEFAULT" mandatories = set() defaults = { "cmssw": os.environ['CMSSW_BASE'], "parallelJobs": "1", "jobid": "", "needsproxy": "false", } needpackages = {"Alignment/OfflineValidation"} optionals = {"jobmode"} def __init__(self, valName, alignment, config): import random self.name = valName self.alignmentToValidate = alignment self.general = config.getGeneral() self.randomWorkdirPart = "%0i"%random.randint(1,10e9) self.configFiles = [] self.config = config self.jobid = "" theUpdate = config.getResultingSection(self.valType+":"+self.name, defaultDict = self.defaults, demandPars = self.mandatories) self.general.update(theUpdate) self.jobmode = self.general["jobmode"] self.NJobs = int(self.general["parallelJobs"]) self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") # limit maximum number of parallel jobs to 40 # (each output file is approximately 20MB) maximumNumberJobs = 40 if self.NJobs > maximumNumberJobs: msg = ("Maximum allowed number of parallel jobs " +str(maximumNumberJobs)+" exceeded!!!") raise AllInOneError(msg) if self.NJobs > 1 and not isinstance(self, ParallelValidation): raise AllInOneError("Parallel jobs not implemented for {}!\n" "Please set parallelJobs = 1.".format(type(self).__name__)) self.jobid = self.general["jobid"] if self.jobid: try: #make sure it's actually a valid jobid output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) if "is not found" in output: raise RuntimeError except RuntimeError: raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) self.cmssw = self.general["cmssw"] badcharacters = r"\'" for character in badcharacters: if character in self.cmssw: raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" "path name. If you really have it in such a ridiculously named location,\n" "try making a symbolic link somewhere with a decent name.") try: os.listdir(self.cmssw) except OSError: raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') if self.cmssw == os.environ["CMSSW_BASE"]: self.scramarch = os.environ["SCRAM_ARCH"] self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] else: command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') commandoutput = getCommandOutput2(command).split('\n') self.cmssw = commandoutput[0] self.scramarch = commandoutput[1] self.cmsswreleasebase = commandoutput[2] self.packages = {} for package in self.needpackages: for placetolook in self.cmssw, self.cmsswreleasebase: pkgpath = os.path.join(placetolook, "src", package) if os.path.exists(pkgpath): self.packages[package] = pkgpath break else: raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) self.AutoAlternates = True if config.has_option("alternateTemplates","AutoAlternates"): try: self.AutoAlternates = json.loads(config.get("alternateTemplates","AutoAlternates").lower()) except ValueError: raise AllInOneError("AutoAlternates needs to be true or false, not %s" % config.get("alternateTemplates","AutoAlternates")) knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals ignoreOpts = [] config.checkInput(self.valType+":"+self.name, knownSimpleOptions = knownOpts, ignoreOptions = ignoreOpts) def getRepMap(self, alignment = None): from .plottingOptions import PlottingOptions if alignment == None: alignment = self.alignmentToValidate try: result = PlottingOptions(self.config, self.valType) except KeyError: result = {} result.update(alignment.getRepMap()) result.update(self.general) result.update({ "workdir": os.path.join(self.general["workdir"], self.randomWorkdirPart), "datadir": self.general["datadir"], "logdir": self.general["logdir"], "CommandLineTemplate": ("#run configfile and post-proccess it\n" "cmsRun %(cfgFile)s\n" "%(postProcess)s "), "CMSSW_BASE": self.cmssw, "SCRAM_ARCH": self.scramarch, "CMSSW_RELEASE_BASE": self.cmsswreleasebase, "alignmentName": alignment.name, "condLoad": alignment.getConditions(), "LoadGlobalTagTemplate": configTemplates.loadGlobalTagTemplate, }) result.update(self.packages) return result @abstractproperty def filesToCompare(self): pass def getCompareStrings( self, requestId = None, plain = False ): result = {} repMap = self.getRepMap().copy() for validationId in self.filesToCompare: repMap["file"] = self.filesToCompare[ validationId ] if repMap["file"].startswith( "/castor/" ): repMap["file"] = "rfio:%(file)s"%repMap elif repMap["file"].startswith( "/store/" ): repMap["file"] = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if plain: result[validationId]=repMap["file"] else: result[validationId]= "%(file)s=%(title)s|%(color)s|%(style)s"%repMap if requestId == None: return result else: if not "." in requestId: requestId += ".%s"%self.defaultReferenceName if not requestId.split(".")[-1] in result: msg = ("could not find %s in reference Objects!" %requestId.split(".")[-1]) raise AllInOneError(msg) return result[ requestId.split(".")[-1] ] def createFiles(self, fileContents, path, repMap = None, repMaps = None): """repMap: single map for all files repMaps: a dict, with the filenames as the keys""" if repMap is not None and repMaps is not None: raise AllInOneError("createFiles can only take repMap or repMaps (or neither), not both") result = [] for fileName in fileContents: filePath = os.path.join(path, fileName) result.append(filePath) for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)): theFile = open( filePathi, "w" ) fileContentsi = fileContents[ fileName ] if repMaps is not None: repMap = repMaps[fileName] if repMap is not None: repMap.update({"nIndex": str(i)}) fileContentsi = replaceByMap(fileContentsi, repMap) theFile.write( fileContentsi ) theFile.close() return result def createConfiguration(self, fileContents, path, schedule = None, repMap = None, repMaps = None): self.configFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) if not schedule == None: schedule = [os.path.join( path, cfgName) for cfgName in schedule] for cfgName in schedule: if not cfgName in self.configFiles: msg = ("scheduled %s missing in generated configfiles: %s" %(cfgName, self.configFiles)) raise AllInOneError(msg) for cfgName in self.configFiles: if not cfgName in schedule: msg = ("generated configuration %s not scheduled: %s" %(cfgName, schedule)) raise AllInOneError(msg) self.configFiles = schedule return self.configFiles def createScript(self, fileContents, path, downloadFiles=[], repMap = None, repMaps = None): self.scriptFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) for script in self.scriptFiles: for scriptwithindex in addIndex(script, self.NJobs): os.chmod(scriptwithindex,0o755) return self.scriptFiles def createCrabCfg(self, fileContents, path ): if self.NJobs > 1: msg = ("jobmode 'crab' not supported for parallel validation." " Please set parallelJobs = 1.") raise AllInOneError(msg) self.crabConfigFiles = self.createFiles(fileContents, path) return self.crabConfigFiles class GenericValidationData(GenericValidation): """ Subclass of `GenericValidation` which is the base for validations using datasets. """ needParentFiles = False mandatories = {"dataset", "maxevents"} defaults = { "runRange": "", "firstRun": "", "lastRun": "", "begin": "", "end": "", "JSON": "", "dasinstance": "prod/global", "ttrhbuilder":"WithAngleAndTemplate", "usepixelqualityflag": "True", } optionals = {"magneticfield"} # MASKED: __init__ function (lines 271-396) def getRepMap(self, alignment = None): result = super(GenericValidationData, self).getRepMap(alignment) outputfile = os.path.expandvars(replaceByMap( "%s_%s_.oO[name]Oo..root" % (self.outputBaseName, self.name) , result)) resultfile = os.path.expandvars(replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./" + "%s_%s_.oO[name]Oo..root" % (self.resultBaseName, self.name)) , result)) result.update({ "resultFile": ".oO[resultFiles[.oO[nIndex]Oo.]]Oo.", "resultFiles": addIndex(resultfile, self.NJobs), "finalResultFile": resultfile, "outputFile": ".oO[outputFiles[.oO[nIndex]Oo.]]Oo.", "outputFiles": addIndex(outputfile, self.NJobs), "finalOutputFile": outputfile, "ProcessName": self.ProcessName, "Bookkeeping": self.Bookkeeping, "LoadBasicModules": self.LoadBasicModules, "TrackSelectionRefitting": self.TrackSelectionRefitting, "ValidationConfig": self.ValidationTemplate, "FileOutputTemplate": self.FileOutputTemplate, "DefinePath": self.DefinePath, }) return result @property def cfgName(self): return "%s.%s.%s_cfg.py"%( self.configBaseName, self.name, self.alignmentToValidate.name ) @abstractproperty def ProcessName(self): pass @property def cfgTemplate(self): return configTemplates.cfgTemplate @abstractproperty def ValidationTemplate(self): pass @property def filesToCompare(self): return {self.defaultReferenceName: self.getRepMap()["finalResultFile"]} def createConfiguration(self, path ): repMap = self.getRepMap() cfgs = {self.cfgName: self.cfgTemplate} super(GenericValidationData, self).createConfiguration(cfgs, path, repMap=repMap) def createScript(self, path, template = configTemplates.scriptTemplate, downloadFiles=[], repMap = None, repMaps = None): scriptName = "%s.%s.%s.sh"%(self.scriptBaseName, self.name, self.alignmentToValidate.name ) if repMap is None and repMaps is None: repMap = self.getRepMap() repMap["CommandLine"]="" for cfg in self.configFiles: repMap["CommandLine"]+= repMap["CommandLineTemplate"]%{"cfgFile":addIndex(cfg, self.NJobs, ".oO[nIndex]Oo."), "postProcess":"" } scripts = {scriptName: template} return super(GenericValidationData, self).createScript(scripts, path, downloadFiles = downloadFiles, repMap = repMap, repMaps = repMaps) def createCrabCfg(self, path, crabCfgBaseName): """ Method which creates a `crab.cfg` for a validation on datasets. Arguments: - `path`: Path at which the file will be stored. - `crabCfgBaseName`: String which depends on the actual type of validation calling this method. """ crabCfgName = "crab.%s.%s.%s.cfg"%( crabCfgBaseName, self.name, self.alignmentToValidate.name ) repMap = self.getRepMap() repMap["script"] = "dummy_script.sh" # repMap["crabOutputDir"] = os.path.basename( path ) repMap["crabWorkingDir"] = crabCfgName.split( '.cfg' )[0] self.crabWorkingDir = repMap["crabWorkingDir"] repMap["numberOfJobs"] = self.general["parallelJobs"] repMap["cfgFile"] = self.configFiles[0] repMap["queue"] = self.jobmode.split( ',' )[1].split( '-q' )[1] if self.dataset.dataType() == "mc": repMap["McOrData"] = "events = .oO[nEvents]Oo." elif self.dataset.dataType() == "data": repMap["McOrData"] = "lumis = -1" if self.jobmode.split( ',' )[0] == "crab": print ("For jobmode 'crab' the parameter 'maxevents' will be " "ignored and all events will be processed.") else: raise AllInOneError("Unknown data type! Can't run in crab mode") crabCfg = {crabCfgName: replaceByMap( configTemplates.crabCfgTemplate, repMap ) } return super(GenericValidationData, self).createCrabCfg( crabCfg, path ) @property def Bookkeeping(self): return configTemplates.Bookkeeping @property def LoadBasicModules(self): return configTemplates.LoadBasicModules @abstractproperty def TrackSelectionRefitting(self): pass @property def FileOutputTemplate(self): return configTemplates.FileOutputTemplate @abstractproperty def DefinePath(self): pass class GenericValidationData_CTSR(GenericValidationData): #common track selection and refitting defaults = { "momentumconstraint": "None", "openmasswindow": "False", "cosmicsdecomode": "True", "removetrackhitfiltercommands": "", "appendtrackhitfiltercommands": "", } def getRepMap(self, alignment=None): result = super(GenericValidationData_CTSR, self).getRepMap(alignment) from .trackSplittingValidation import TrackSplittingValidation result.update({ "ValidationSequence": self.ValidationSequence, "istracksplitting": str(isinstance(self, TrackSplittingValidation)), "cosmics0T": str(self.cosmics0T), "use_d0cut": str(self.use_d0cut), "ispvvalidation": str(self.isPVValidation) }) commands = [] for removeorappend in "remove", "append": optionname = removeorappend + "trackhitfiltercommands" if result[optionname]: for command in result[optionname].split(","): command = command.strip() commands.append('process.TrackerTrackHitFilter.commands.{}("{}")'.format(removeorappend, command)) result["trackhitfiltercommands"] = "\n".join(commands) return result @property def use_d0cut(self): return "Cosmics" not in self.general["trackcollection"] #use it for collisions only @property def isPVValidation(self): return False # only for PV Validation sequence @property def TrackSelectionRefitting(self): return configTemplates.CommonTrackSelectionRefitting @property def DefinePath(self): return configTemplates.DefinePath_CommonSelectionRefitting @abstractproperty def ValidationSequence(self): pass @property def cosmics0T(self): if "Cosmics" not in self.general["trackcollection"]: return False Bfield = self.dataset.magneticFieldForRun() if Bfield < 0.5: return True if isinstance(Bfield, str): if "unknown " in Bfield: msg = Bfield.replace("unknown ","",1) elif Bfield == "unknown": msg = "Can't get the B field for %s." % self.dataset.name() else: msg = "B field = {}???".format(Bfield) raise AllInOneError(msg + "\n" "To use this dataset, specify magneticfield = [value] in your .ini config file.") return False class ParallelValidation(GenericValidation): @classmethod def initMerge(cls): return "" @abstractmethod def appendToMerge(self): pass @classmethod def doInitMerge(cls): from .plottingOptions import PlottingOptions result = cls.initMerge() result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result def doMerge(self): result = self.appendToMerge() if result[-1] != "\n": result += "\n" result += ("if [[ tmpMergeRetCode -eq 0 ]]; then\n" " xrdcp -f .oO[finalOutputFile]Oo. root://eoscms//eos/cms.oO[finalResultFile]Oo.\n" "fi\n" "if [[ ${tmpMergeRetCode} -gt ${mergeRetCode} ]]; then\n" " mergeRetCode=${tmpMergeRetCode}\n" "fi\n") result = replaceByMap(result, self.getRepMap()) return result class ValidationWithPlots(GenericValidation): @classmethod def runPlots(cls, validations): return ("cp .oO[plottingscriptpath]Oo. .\n" "root -x -b -q .oO[plottingscriptname]Oo.++") @abstractmethod def appendToPlots(self): pass @abstractmethod def plottingscriptname(cls): """override with a classmethod""" @abstractmethod def plottingscripttemplate(cls): """override with a classmethod""" @abstractmethod def plotsdirname(cls): """override with a classmethod""" @classmethod def doRunPlots(cls, validations): from .plottingOptions import PlottingOptions cls.createPlottingScript(validations) result = cls.runPlots(validations) result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result @classmethod def createPlottingScript(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() filename = replaceByMap(".oO[plottingscriptpath]Oo.", repmap) repmap["PlottingInstantiation"] = "\n".join( replaceByMap(v.appendToPlots(), v.getRepMap()).rstrip("\n") for v in validations ) plottingscript = replaceByMap(cls.plottingscripttemplate(), repmap) with open(filename, 'w') as f: f.write(plottingscript) class ValidationWithPlotsSummaryBase(ValidationWithPlots): class SummaryItem(object): def __init__(self, name, values, format=None, latexname=None, latexformat=None): """ name: name of the summary item, goes on top of the column values: value for each alignment (in order of rows) format: python format string (default: {:.3g}, meaning up to 3 significant digits) latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) latexformat: format for latex (default: format) """ if format is None: format = "{:.3g}" if latexname is None: latexname = name if latexformat is None: latexformat = format self.__name = name self.__values = values self.__format = format self.__latexname = latexname self.__latexformat = latexformat def name(self, latex=False): if latex: return self.__latexname else: return self.__name def format(self, value, latex=False): if latex: fmt = self.__latexformat else: fmt = self.__format if re.match(".*[{][^}]*[fg][}].*", fmt): value = float(value) return fmt.format(value) def values(self, latex=False): result = [self.format(v, latex=latex) for v in self.__values] return result def value(self, i, latex): return self.values(latex)[i] @abstractmethod def getsummaryitems(cls, folder): """override with a classmethod that returns a list of SummaryItems based on the plots saved in folder""" __summaryitems = None __lastfolder = None @classmethod def summaryitemsstring(cls, folder=None, latex=False, transpose=True): if folder is None: folder = cls.plotsdirname() if folder.startswith( "/castor/" ): folder = "rfio:%(file)s"%repMap elif folder.startswith( "/store/" ): folder = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if cls.__summaryitems is None or cls.__lastfolder != folder: cls.__lastfolder = folder cls.__summaryitems = cls.getsummaryitems(folder) summaryitems = cls.__summaryitems if not summaryitems: raise AllInOneError("No summary items!") size = {len(_.values(latex)) for _ in summaryitems} if len(size) != 1: raise AllInOneError("Some summary items have different numbers of values\n{}".format(size)) size = size.pop() if transpose: columnwidths = ([max(len(_.name(latex)) for _ in summaryitems)] + [max(len(_.value(i, latex)) for _ in summaryitems) for i in range(size)]) else: columnwidths = [max(len(entry) for entry in [_.name(latex)] + _.values(latex)) for _ in summaryitems] if latex: join = " & " else: join = " " row = join.join("{{:{}}}".format(width) for width in columnwidths) if transpose: rows = [row.format(*[_.name(latex)]+_.values(latex)) for _ in summaryitems] else: rows = [] rows.append(row.format(*(_.name for _ in summaryitems))) for i in range(size): rows.append(row.format(*(_.value(i, latex) for _ in summaryitems))) if latex: join = " \\\\\n" else: join = "\n" result = join.join(rows) if latex: result = (r"\begin{{tabular}}{{{}}}".format("|" + "|".join("c"*(len(columnwidths))) + "|") + "\n" + result + "\n" + r"\end{tabular}") return result @classmethod def printsummaryitems(cls, *args, **kwargs): print(cls.summaryitemsstring(*args, **kwargs)) @classmethod def writesummaryitems(cls, filename, *args, **kwargs): with open(filename, "w") as f: f.write(cls.summaryitemsstring(*args, **kwargs)+"\n") class ValidationWithPlotsSummary(ValidationWithPlotsSummaryBase): @classmethod def getsummaryitems(cls, folder): result = [] with open(os.path.join(folder, "{}Summary.txt".format(cls.__name__))) as f: for line in f: split = line.rstrip("\n").split("\t") kwargs = {} for thing in split[:]: if thing.startswith("format="): kwargs["format"] = thing.replace("format=", "", 1) split.remove(thing) if thing.startswith("latexname="): kwargs["latexname"] = thing.replace("latexname=", "", 1) split.remove(thing) if thing.startswith("latexformat="): kwargs["latexformat"] = thing.replace("latexformat=", "", 1) split.remove(thing) name = split[0] values = split[1:] result.append(cls.SummaryItem(name, values, **kwargs)) return result class ValidationWithComparison(GenericValidation): @classmethod def doComparison(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() repmap["compareStrings"] = " , ".join(v.getCompareStrings("OfflineValidation") for v in validations) repmap["compareStringsPlain"] = " , ".join(v.getCompareStrings("OfflineValidation", True) for v in validations) comparison = replaceByMap(cls.comparisontemplate(), repmap) return comparison @classmethod def comparisontemplate(cls): return configTemplates.compareAlignmentsExecution @classmethod def comparealignmentspath(cls): return ".oO[Alignment/OfflineValidation]Oo./scripts/.oO[compareAlignmentsName]Oo." @abstractmethod def comparealignmentsname(cls): """classmethod""" class ValidationForPresentation(ValidationWithPlots): @abstractmethod def presentationsubsections(cls): """classmethod"""
def __init__(self, valName, alignment, config): """ This method adds additional items to the `self.general` dictionary which are only needed for validations using datasets. Arguments: - `valName`: String which identifies individual validation instances - `alignment`: `Alignment` instance to validate - `config`: `BetterConfigParser` instance which includes the configuration of the validations """ super(GenericValidationData, self).__init__(valName, alignment, config) # if maxevents is not specified, cannot calculate number of events for # each parallel job, and therefore running only a single job if int( self.general["maxevents"] ) < 0 and self.NJobs > 1: msg = ("Maximum number of events (maxevents) not specified: " "cannot use parallel jobs.") raise AllInOneError(msg) if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs: msg = ("maxevents has to be divisible by parallelJobs") raise AllInOneError(msg) tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == "" and self.general["firstRun"] == "" and self.general["lastRun"] == "" and self.general["begin"] == "" and self.general["end"] == "") if self.general["dataset"] not in globalDictionaries.usedDatasets: globalDictionaries.usedDatasets[self.general["dataset"]] = {} if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]: if globalDictionaries.usedDatasets[self.general["dataset"]] != {}: print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\n" "This is allowed, but make sure it's not a mistake") % self.general["dataset"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None} Bfield = self.general.get("magneticfield", None) if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None: dataset = Dataset( self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst, cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield, dasinstance = self.general["dasinstance"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] self.general["magneticField"] = self.dataset.magneticField() self.general["defaultMagneticField"] = "MagneticField" if self.general["magneticField"] == "unknown": print("Could not get the magnetic field for this dataset.") print("Using the default: ", self.general["defaultMagneticField"]) self.general["magneticField"] = '.oO[defaultMagneticField]Oo.' if not self.jobmode.split( ',' )[0] == "crab": try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], parent = self.needParentFiles ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str(e) raise AllInOneError(msg) else: if self.dataset.predefined(): msg = ("For jobmode 'crab' you cannot use predefined datasets " "(in your case: '%s')."%( self.dataset.name() )) raise AllInOneError( msg ) try: theUpdate = config.getResultingSection(self.valType+":"+self.name, demandPars = ["parallelJobs"]) except AllInOneError as e: msg = str(e)[:-1]+" when using 'jobmode: crab'." raise AllInOneError(msg) self.general.update(theUpdate) if self.general["begin"] or self.general["end"]: ( self.general["begin"], self.general["end"], self.general["firstRun"], self.general["lastRun"] ) = self.dataset.convertTimeToRun( firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], shortTuple = False) if self.general["begin"] == None: self.general["begin"] = "" if self.general["end"] == None: self.general["end"] = "" self.general["firstRun"] = str( self.general["firstRun"] ) self.general["lastRun"] = str( self.general["lastRun"] ) if ( not self.general["firstRun"] ) and \ ( self.general["end"] or self.general["lastRun"] ): self.general["firstRun"] = str( self.dataset.runList()[0]["run_number"]) if ( not self.general["lastRun"] ) and \ ( self.general["begin"] or self.general["firstRun"] ): self.general["lastRun"] = str( self.dataset.runList()[-1]["run_number"]) if self.general["firstRun"] and self.general["lastRun"]: if int(self.general["firstRun"]) > int(self.general["lastRun"]): msg = ( "The lower time/runrange limit ('begin'/'firstRun') " "chosen is greater than the upper time/runrange limit " "('end'/'lastRun').") raise AllInOneError( msg ) self.general["runRange"] = (self.general["firstRun"] + '-' + self.general["lastRun"]) try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], crab = True ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str( e ) raise AllInOneError( msg ) self.general["usepixelqualityflag"] = pythonboolstring(self.general["usepixelqualityflag"], "usepixelqualityflag")
271
396
from __future__ import print_function from __future__ import absolute_import from builtins import range from abc import ABCMeta, abstractmethod, abstractproperty import os import re import json from . import globalDictionaries from . import configTemplates from .dataset import Dataset from .helperFunctions import replaceByMap, addIndex, getCommandOutput2, boolfromstring, pythonboolstring from .TkAlExceptions import AllInOneError from six import with_metaclass class ValidationMetaClass(ABCMeta): sets = ["mandatories", "optionals", "needpackages"] dicts = ["defaults"] def __new__(cls, clsname, bases, dct): for setname in cls.sets: if setname not in dct: dct[setname] = set() dct[setname] = set.union(dct[setname], *(getattr(base, setname) for base in bases if hasattr(base, setname))) for dictname in cls.dicts: if dictname not in dct: dct[dictname] = {} for base in bases: if not hasattr(base, dictname): continue newdict = getattr(base, dictname) for key in set(newdict) & set(dct[dictname]): if newdict[key] != dct[dictname][key]: raise ValueError("Inconsistent values of defaults[{}]: {}, {}".format(key, newdict[key], dct[dictname][key])) dct[dictname].update(newdict) for setname in cls.sets: #e.g. removemandatories, used in preexistingvalidation #use with caution if "remove"+setname not in dct: dct["remove"+setname] = set() dct["remove"+setname] = set.union(dct["remove"+setname], *(getattr(base, "remove"+setname) for base in bases if hasattr(base, "remove"+setname))) dct[setname] -= dct["remove"+setname] return super(ValidationMetaClass, cls).__new__(cls, clsname, bases, dct) class GenericValidation(with_metaclass(ValidationMetaClass,object)): defaultReferenceName = "DEFAULT" mandatories = set() defaults = { "cmssw": os.environ['CMSSW_BASE'], "parallelJobs": "1", "jobid": "", "needsproxy": "false", } needpackages = {"Alignment/OfflineValidation"} optionals = {"jobmode"} def __init__(self, valName, alignment, config): import random self.name = valName self.alignmentToValidate = alignment self.general = config.getGeneral() self.randomWorkdirPart = "%0i"%random.randint(1,10e9) self.configFiles = [] self.config = config self.jobid = "" theUpdate = config.getResultingSection(self.valType+":"+self.name, defaultDict = self.defaults, demandPars = self.mandatories) self.general.update(theUpdate) self.jobmode = self.general["jobmode"] self.NJobs = int(self.general["parallelJobs"]) self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") # limit maximum number of parallel jobs to 40 # (each output file is approximately 20MB) maximumNumberJobs = 40 if self.NJobs > maximumNumberJobs: msg = ("Maximum allowed number of parallel jobs " +str(maximumNumberJobs)+" exceeded!!!") raise AllInOneError(msg) if self.NJobs > 1 and not isinstance(self, ParallelValidation): raise AllInOneError("Parallel jobs not implemented for {}!\n" "Please set parallelJobs = 1.".format(type(self).__name__)) self.jobid = self.general["jobid"] if self.jobid: try: #make sure it's actually a valid jobid output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) if "is not found" in output: raise RuntimeError except RuntimeError: raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) self.cmssw = self.general["cmssw"] badcharacters = r"\'" for character in badcharacters: if character in self.cmssw: raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" "path name. If you really have it in such a ridiculously named location,\n" "try making a symbolic link somewhere with a decent name.") try: os.listdir(self.cmssw) except OSError: raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') if self.cmssw == os.environ["CMSSW_BASE"]: self.scramarch = os.environ["SCRAM_ARCH"] self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] else: command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') commandoutput = getCommandOutput2(command).split('\n') self.cmssw = commandoutput[0] self.scramarch = commandoutput[1] self.cmsswreleasebase = commandoutput[2] self.packages = {} for package in self.needpackages: for placetolook in self.cmssw, self.cmsswreleasebase: pkgpath = os.path.join(placetolook, "src", package) if os.path.exists(pkgpath): self.packages[package] = pkgpath break else: raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) self.AutoAlternates = True if config.has_option("alternateTemplates","AutoAlternates"): try: self.AutoAlternates = json.loads(config.get("alternateTemplates","AutoAlternates").lower()) except ValueError: raise AllInOneError("AutoAlternates needs to be true or false, not %s" % config.get("alternateTemplates","AutoAlternates")) knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals ignoreOpts = [] config.checkInput(self.valType+":"+self.name, knownSimpleOptions = knownOpts, ignoreOptions = ignoreOpts) def getRepMap(self, alignment = None): from .plottingOptions import PlottingOptions if alignment == None: alignment = self.alignmentToValidate try: result = PlottingOptions(self.config, self.valType) except KeyError: result = {} result.update(alignment.getRepMap()) result.update(self.general) result.update({ "workdir": os.path.join(self.general["workdir"], self.randomWorkdirPart), "datadir": self.general["datadir"], "logdir": self.general["logdir"], "CommandLineTemplate": ("#run configfile and post-proccess it\n" "cmsRun %(cfgFile)s\n" "%(postProcess)s "), "CMSSW_BASE": self.cmssw, "SCRAM_ARCH": self.scramarch, "CMSSW_RELEASE_BASE": self.cmsswreleasebase, "alignmentName": alignment.name, "condLoad": alignment.getConditions(), "LoadGlobalTagTemplate": configTemplates.loadGlobalTagTemplate, }) result.update(self.packages) return result @abstractproperty def filesToCompare(self): pass def getCompareStrings( self, requestId = None, plain = False ): result = {} repMap = self.getRepMap().copy() for validationId in self.filesToCompare: repMap["file"] = self.filesToCompare[ validationId ] if repMap["file"].startswith( "/castor/" ): repMap["file"] = "rfio:%(file)s"%repMap elif repMap["file"].startswith( "/store/" ): repMap["file"] = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if plain: result[validationId]=repMap["file"] else: result[validationId]= "%(file)s=%(title)s|%(color)s|%(style)s"%repMap if requestId == None: return result else: if not "." in requestId: requestId += ".%s"%self.defaultReferenceName if not requestId.split(".")[-1] in result: msg = ("could not find %s in reference Objects!" %requestId.split(".")[-1]) raise AllInOneError(msg) return result[ requestId.split(".")[-1] ] def createFiles(self, fileContents, path, repMap = None, repMaps = None): """repMap: single map for all files repMaps: a dict, with the filenames as the keys""" if repMap is not None and repMaps is not None: raise AllInOneError("createFiles can only take repMap or repMaps (or neither), not both") result = [] for fileName in fileContents: filePath = os.path.join(path, fileName) result.append(filePath) for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)): theFile = open( filePathi, "w" ) fileContentsi = fileContents[ fileName ] if repMaps is not None: repMap = repMaps[fileName] if repMap is not None: repMap.update({"nIndex": str(i)}) fileContentsi = replaceByMap(fileContentsi, repMap) theFile.write( fileContentsi ) theFile.close() return result def createConfiguration(self, fileContents, path, schedule = None, repMap = None, repMaps = None): self.configFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) if not schedule == None: schedule = [os.path.join( path, cfgName) for cfgName in schedule] for cfgName in schedule: if not cfgName in self.configFiles: msg = ("scheduled %s missing in generated configfiles: %s" %(cfgName, self.configFiles)) raise AllInOneError(msg) for cfgName in self.configFiles: if not cfgName in schedule: msg = ("generated configuration %s not scheduled: %s" %(cfgName, schedule)) raise AllInOneError(msg) self.configFiles = schedule return self.configFiles def createScript(self, fileContents, path, downloadFiles=[], repMap = None, repMaps = None): self.scriptFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) for script in self.scriptFiles: for scriptwithindex in addIndex(script, self.NJobs): os.chmod(scriptwithindex,0o755) return self.scriptFiles def createCrabCfg(self, fileContents, path ): if self.NJobs > 1: msg = ("jobmode 'crab' not supported for parallel validation." " Please set parallelJobs = 1.") raise AllInOneError(msg) self.crabConfigFiles = self.createFiles(fileContents, path) return self.crabConfigFiles class GenericValidationData(GenericValidation): """ Subclass of `GenericValidation` which is the base for validations using datasets. """ needParentFiles = False mandatories = {"dataset", "maxevents"} defaults = { "runRange": "", "firstRun": "", "lastRun": "", "begin": "", "end": "", "JSON": "", "dasinstance": "prod/global", "ttrhbuilder":"WithAngleAndTemplate", "usepixelqualityflag": "True", } optionals = {"magneticfield"} def __init__(self, valName, alignment, config): """ This method adds additional items to the `self.general` dictionary which are only needed for validations using datasets. Arguments: - `valName`: String which identifies individual validation instances - `alignment`: `Alignment` instance to validate - `config`: `BetterConfigParser` instance which includes the configuration of the validations """ super(GenericValidationData, self).__init__(valName, alignment, config) # if maxevents is not specified, cannot calculate number of events for # each parallel job, and therefore running only a single job if int( self.general["maxevents"] ) < 0 and self.NJobs > 1: msg = ("Maximum number of events (maxevents) not specified: " "cannot use parallel jobs.") raise AllInOneError(msg) if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs: msg = ("maxevents has to be divisible by parallelJobs") raise AllInOneError(msg) tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == "" and self.general["firstRun"] == "" and self.general["lastRun"] == "" and self.general["begin"] == "" and self.general["end"] == "") if self.general["dataset"] not in globalDictionaries.usedDatasets: globalDictionaries.usedDatasets[self.general["dataset"]] = {} if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]: if globalDictionaries.usedDatasets[self.general["dataset"]] != {}: print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\n" "This is allowed, but make sure it's not a mistake") % self.general["dataset"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None} Bfield = self.general.get("magneticfield", None) if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None: dataset = Dataset( self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst, cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield, dasinstance = self.general["dasinstance"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] self.general["magneticField"] = self.dataset.magneticField() self.general["defaultMagneticField"] = "MagneticField" if self.general["magneticField"] == "unknown": print("Could not get the magnetic field for this dataset.") print("Using the default: ", self.general["defaultMagneticField"]) self.general["magneticField"] = '.oO[defaultMagneticField]Oo.' if not self.jobmode.split( ',' )[0] == "crab": try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], parent = self.needParentFiles ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str(e) raise AllInOneError(msg) else: if self.dataset.predefined(): msg = ("For jobmode 'crab' you cannot use predefined datasets " "(in your case: '%s')."%( self.dataset.name() )) raise AllInOneError( msg ) try: theUpdate = config.getResultingSection(self.valType+":"+self.name, demandPars = ["parallelJobs"]) except AllInOneError as e: msg = str(e)[:-1]+" when using 'jobmode: crab'." raise AllInOneError(msg) self.general.update(theUpdate) if self.general["begin"] or self.general["end"]: ( self.general["begin"], self.general["end"], self.general["firstRun"], self.general["lastRun"] ) = self.dataset.convertTimeToRun( firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], shortTuple = False) if self.general["begin"] == None: self.general["begin"] = "" if self.general["end"] == None: self.general["end"] = "" self.general["firstRun"] = str( self.general["firstRun"] ) self.general["lastRun"] = str( self.general["lastRun"] ) if ( not self.general["firstRun"] ) and \ ( self.general["end"] or self.general["lastRun"] ): self.general["firstRun"] = str( self.dataset.runList()[0]["run_number"]) if ( not self.general["lastRun"] ) and \ ( self.general["begin"] or self.general["firstRun"] ): self.general["lastRun"] = str( self.dataset.runList()[-1]["run_number"]) if self.general["firstRun"] and self.general["lastRun"]: if int(self.general["firstRun"]) > int(self.general["lastRun"]): msg = ( "The lower time/runrange limit ('begin'/'firstRun') " "chosen is greater than the upper time/runrange limit " "('end'/'lastRun').") raise AllInOneError( msg ) self.general["runRange"] = (self.general["firstRun"] + '-' + self.general["lastRun"]) try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], crab = True ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str( e ) raise AllInOneError( msg ) self.general["usepixelqualityflag"] = pythonboolstring(self.general["usepixelqualityflag"], "usepixelqualityflag") def getRepMap(self, alignment = None): result = super(GenericValidationData, self).getRepMap(alignment) outputfile = os.path.expandvars(replaceByMap( "%s_%s_.oO[name]Oo..root" % (self.outputBaseName, self.name) , result)) resultfile = os.path.expandvars(replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./" + "%s_%s_.oO[name]Oo..root" % (self.resultBaseName, self.name)) , result)) result.update({ "resultFile": ".oO[resultFiles[.oO[nIndex]Oo.]]Oo.", "resultFiles": addIndex(resultfile, self.NJobs), "finalResultFile": resultfile, "outputFile": ".oO[outputFiles[.oO[nIndex]Oo.]]Oo.", "outputFiles": addIndex(outputfile, self.NJobs), "finalOutputFile": outputfile, "ProcessName": self.ProcessName, "Bookkeeping": self.Bookkeeping, "LoadBasicModules": self.LoadBasicModules, "TrackSelectionRefitting": self.TrackSelectionRefitting, "ValidationConfig": self.ValidationTemplate, "FileOutputTemplate": self.FileOutputTemplate, "DefinePath": self.DefinePath, }) return result @property def cfgName(self): return "%s.%s.%s_cfg.py"%( self.configBaseName, self.name, self.alignmentToValidate.name ) @abstractproperty def ProcessName(self): pass @property def cfgTemplate(self): return configTemplates.cfgTemplate @abstractproperty def ValidationTemplate(self): pass @property def filesToCompare(self): return {self.defaultReferenceName: self.getRepMap()["finalResultFile"]} def createConfiguration(self, path ): repMap = self.getRepMap() cfgs = {self.cfgName: self.cfgTemplate} super(GenericValidationData, self).createConfiguration(cfgs, path, repMap=repMap) def createScript(self, path, template = configTemplates.scriptTemplate, downloadFiles=[], repMap = None, repMaps = None): scriptName = "%s.%s.%s.sh"%(self.scriptBaseName, self.name, self.alignmentToValidate.name ) if repMap is None and repMaps is None: repMap = self.getRepMap() repMap["CommandLine"]="" for cfg in self.configFiles: repMap["CommandLine"]+= repMap["CommandLineTemplate"]%{"cfgFile":addIndex(cfg, self.NJobs, ".oO[nIndex]Oo."), "postProcess":"" } scripts = {scriptName: template} return super(GenericValidationData, self).createScript(scripts, path, downloadFiles = downloadFiles, repMap = repMap, repMaps = repMaps) def createCrabCfg(self, path, crabCfgBaseName): """ Method which creates a `crab.cfg` for a validation on datasets. Arguments: - `path`: Path at which the file will be stored. - `crabCfgBaseName`: String which depends on the actual type of validation calling this method. """ crabCfgName = "crab.%s.%s.%s.cfg"%( crabCfgBaseName, self.name, self.alignmentToValidate.name ) repMap = self.getRepMap() repMap["script"] = "dummy_script.sh" # repMap["crabOutputDir"] = os.path.basename( path ) repMap["crabWorkingDir"] = crabCfgName.split( '.cfg' )[0] self.crabWorkingDir = repMap["crabWorkingDir"] repMap["numberOfJobs"] = self.general["parallelJobs"] repMap["cfgFile"] = self.configFiles[0] repMap["queue"] = self.jobmode.split( ',' )[1].split( '-q' )[1] if self.dataset.dataType() == "mc": repMap["McOrData"] = "events = .oO[nEvents]Oo." elif self.dataset.dataType() == "data": repMap["McOrData"] = "lumis = -1" if self.jobmode.split( ',' )[0] == "crab": print ("For jobmode 'crab' the parameter 'maxevents' will be " "ignored and all events will be processed.") else: raise AllInOneError("Unknown data type! Can't run in crab mode") crabCfg = {crabCfgName: replaceByMap( configTemplates.crabCfgTemplate, repMap ) } return super(GenericValidationData, self).createCrabCfg( crabCfg, path ) @property def Bookkeeping(self): return configTemplates.Bookkeeping @property def LoadBasicModules(self): return configTemplates.LoadBasicModules @abstractproperty def TrackSelectionRefitting(self): pass @property def FileOutputTemplate(self): return configTemplates.FileOutputTemplate @abstractproperty def DefinePath(self): pass class GenericValidationData_CTSR(GenericValidationData): #common track selection and refitting defaults = { "momentumconstraint": "None", "openmasswindow": "False", "cosmicsdecomode": "True", "removetrackhitfiltercommands": "", "appendtrackhitfiltercommands": "", } def getRepMap(self, alignment=None): result = super(GenericValidationData_CTSR, self).getRepMap(alignment) from .trackSplittingValidation import TrackSplittingValidation result.update({ "ValidationSequence": self.ValidationSequence, "istracksplitting": str(isinstance(self, TrackSplittingValidation)), "cosmics0T": str(self.cosmics0T), "use_d0cut": str(self.use_d0cut), "ispvvalidation": str(self.isPVValidation) }) commands = [] for removeorappend in "remove", "append": optionname = removeorappend + "trackhitfiltercommands" if result[optionname]: for command in result[optionname].split(","): command = command.strip() commands.append('process.TrackerTrackHitFilter.commands.{}("{}")'.format(removeorappend, command)) result["trackhitfiltercommands"] = "\n".join(commands) return result @property def use_d0cut(self): return "Cosmics" not in self.general["trackcollection"] #use it for collisions only @property def isPVValidation(self): return False # only for PV Validation sequence @property def TrackSelectionRefitting(self): return configTemplates.CommonTrackSelectionRefitting @property def DefinePath(self): return configTemplates.DefinePath_CommonSelectionRefitting @abstractproperty def ValidationSequence(self): pass @property def cosmics0T(self): if "Cosmics" not in self.general["trackcollection"]: return False Bfield = self.dataset.magneticFieldForRun() if Bfield < 0.5: return True if isinstance(Bfield, str): if "unknown " in Bfield: msg = Bfield.replace("unknown ","",1) elif Bfield == "unknown": msg = "Can't get the B field for %s." % self.dataset.name() else: msg = "B field = {}???".format(Bfield) raise AllInOneError(msg + "\n" "To use this dataset, specify magneticfield = [value] in your .ini config file.") return False class ParallelValidation(GenericValidation): @classmethod def initMerge(cls): return "" @abstractmethod def appendToMerge(self): pass @classmethod def doInitMerge(cls): from .plottingOptions import PlottingOptions result = cls.initMerge() result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result def doMerge(self): result = self.appendToMerge() if result[-1] != "\n": result += "\n" result += ("if [[ tmpMergeRetCode -eq 0 ]]; then\n" " xrdcp -f .oO[finalOutputFile]Oo. root://eoscms//eos/cms.oO[finalResultFile]Oo.\n" "fi\n" "if [[ ${tmpMergeRetCode} -gt ${mergeRetCode} ]]; then\n" " mergeRetCode=${tmpMergeRetCode}\n" "fi\n") result = replaceByMap(result, self.getRepMap()) return result class ValidationWithPlots(GenericValidation): @classmethod def runPlots(cls, validations): return ("cp .oO[plottingscriptpath]Oo. .\n" "root -x -b -q .oO[plottingscriptname]Oo.++") @abstractmethod def appendToPlots(self): pass @abstractmethod def plottingscriptname(cls): """override with a classmethod""" @abstractmethod def plottingscripttemplate(cls): """override with a classmethod""" @abstractmethod def plotsdirname(cls): """override with a classmethod""" @classmethod def doRunPlots(cls, validations): from .plottingOptions import PlottingOptions cls.createPlottingScript(validations) result = cls.runPlots(validations) result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result @classmethod def createPlottingScript(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() filename = replaceByMap(".oO[plottingscriptpath]Oo.", repmap) repmap["PlottingInstantiation"] = "\n".join( replaceByMap(v.appendToPlots(), v.getRepMap()).rstrip("\n") for v in validations ) plottingscript = replaceByMap(cls.plottingscripttemplate(), repmap) with open(filename, 'w') as f: f.write(plottingscript) class ValidationWithPlotsSummaryBase(ValidationWithPlots): class SummaryItem(object): def __init__(self, name, values, format=None, latexname=None, latexformat=None): """ name: name of the summary item, goes on top of the column values: value for each alignment (in order of rows) format: python format string (default: {:.3g}, meaning up to 3 significant digits) latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) latexformat: format for latex (default: format) """ if format is None: format = "{:.3g}" if latexname is None: latexname = name if latexformat is None: latexformat = format self.__name = name self.__values = values self.__format = format self.__latexname = latexname self.__latexformat = latexformat def name(self, latex=False): if latex: return self.__latexname else: return self.__name def format(self, value, latex=False): if latex: fmt = self.__latexformat else: fmt = self.__format if re.match(".*[{][^}]*[fg][}].*", fmt): value = float(value) return fmt.format(value) def values(self, latex=False): result = [self.format(v, latex=latex) for v in self.__values] return result def value(self, i, latex): return self.values(latex)[i] @abstractmethod def getsummaryitems(cls, folder): """override with a classmethod that returns a list of SummaryItems based on the plots saved in folder""" __summaryitems = None __lastfolder = None @classmethod def summaryitemsstring(cls, folder=None, latex=False, transpose=True): if folder is None: folder = cls.plotsdirname() if folder.startswith( "/castor/" ): folder = "rfio:%(file)s"%repMap elif folder.startswith( "/store/" ): folder = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if cls.__summaryitems is None or cls.__lastfolder != folder: cls.__lastfolder = folder cls.__summaryitems = cls.getsummaryitems(folder) summaryitems = cls.__summaryitems if not summaryitems: raise AllInOneError("No summary items!") size = {len(_.values(latex)) for _ in summaryitems} if len(size) != 1: raise AllInOneError("Some summary items have different numbers of values\n{}".format(size)) size = size.pop() if transpose: columnwidths = ([max(len(_.name(latex)) for _ in summaryitems)] + [max(len(_.value(i, latex)) for _ in summaryitems) for i in range(size)]) else: columnwidths = [max(len(entry) for entry in [_.name(latex)] + _.values(latex)) for _ in summaryitems] if latex: join = " & " else: join = " " row = join.join("{{:{}}}".format(width) for width in columnwidths) if transpose: rows = [row.format(*[_.name(latex)]+_.values(latex)) for _ in summaryitems] else: rows = [] rows.append(row.format(*(_.name for _ in summaryitems))) for i in range(size): rows.append(row.format(*(_.value(i, latex) for _ in summaryitems))) if latex: join = " \\\\\n" else: join = "\n" result = join.join(rows) if latex: result = (r"\begin{{tabular}}{{{}}}".format("|" + "|".join("c"*(len(columnwidths))) + "|") + "\n" + result + "\n" + r"\end{tabular}") return result @classmethod def printsummaryitems(cls, *args, **kwargs): print(cls.summaryitemsstring(*args, **kwargs)) @classmethod def writesummaryitems(cls, filename, *args, **kwargs): with open(filename, "w") as f: f.write(cls.summaryitemsstring(*args, **kwargs)+"\n") class ValidationWithPlotsSummary(ValidationWithPlotsSummaryBase): @classmethod def getsummaryitems(cls, folder): result = [] with open(os.path.join(folder, "{}Summary.txt".format(cls.__name__))) as f: for line in f: split = line.rstrip("\n").split("\t") kwargs = {} for thing in split[:]: if thing.startswith("format="): kwargs["format"] = thing.replace("format=", "", 1) split.remove(thing) if thing.startswith("latexname="): kwargs["latexname"] = thing.replace("latexname=", "", 1) split.remove(thing) if thing.startswith("latexformat="): kwargs["latexformat"] = thing.replace("latexformat=", "", 1) split.remove(thing) name = split[0] values = split[1:] result.append(cls.SummaryItem(name, values, **kwargs)) return result class ValidationWithComparison(GenericValidation): @classmethod def doComparison(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() repmap["compareStrings"] = " , ".join(v.getCompareStrings("OfflineValidation") for v in validations) repmap["compareStringsPlain"] = " , ".join(v.getCompareStrings("OfflineValidation", True) for v in validations) comparison = replaceByMap(cls.comparisontemplate(), repmap) return comparison @classmethod def comparisontemplate(cls): return configTemplates.compareAlignmentsExecution @classmethod def comparealignmentspath(cls): return ".oO[Alignment/OfflineValidation]Oo./scripts/.oO[compareAlignmentsName]Oo." @abstractmethod def comparealignmentsname(cls): """classmethod""" class ValidationForPresentation(ValidationWithPlots): @abstractmethod def presentationsubsections(cls): """classmethod"""
createCrabCfg
Method which creates a `crab.cfg` for a validation on datasets. Arguments: - `path`: Path at which the file will be stored. - `crabCfgBaseName`: String which depends on the actual type of validation calling this method.
from __future__ import print_function from __future__ import absolute_import from builtins import range from abc import ABCMeta, abstractmethod, abstractproperty import os import re import json from . import globalDictionaries from . import configTemplates from .dataset import Dataset from .helperFunctions import replaceByMap, addIndex, getCommandOutput2, boolfromstring, pythonboolstring from .TkAlExceptions import AllInOneError from six import with_metaclass class ValidationMetaClass(ABCMeta): sets = ["mandatories", "optionals", "needpackages"] dicts = ["defaults"] def __new__(cls, clsname, bases, dct): for setname in cls.sets: if setname not in dct: dct[setname] = set() dct[setname] = set.union(dct[setname], *(getattr(base, setname) for base in bases if hasattr(base, setname))) for dictname in cls.dicts: if dictname not in dct: dct[dictname] = {} for base in bases: if not hasattr(base, dictname): continue newdict = getattr(base, dictname) for key in set(newdict) & set(dct[dictname]): if newdict[key] != dct[dictname][key]: raise ValueError("Inconsistent values of defaults[{}]: {}, {}".format(key, newdict[key], dct[dictname][key])) dct[dictname].update(newdict) for setname in cls.sets: #e.g. removemandatories, used in preexistingvalidation #use with caution if "remove"+setname not in dct: dct["remove"+setname] = set() dct["remove"+setname] = set.union(dct["remove"+setname], *(getattr(base, "remove"+setname) for base in bases if hasattr(base, "remove"+setname))) dct[setname] -= dct["remove"+setname] return super(ValidationMetaClass, cls).__new__(cls, clsname, bases, dct) class GenericValidation(with_metaclass(ValidationMetaClass,object)): defaultReferenceName = "DEFAULT" mandatories = set() defaults = { "cmssw": os.environ['CMSSW_BASE'], "parallelJobs": "1", "jobid": "", "needsproxy": "false", } needpackages = {"Alignment/OfflineValidation"} optionals = {"jobmode"} def __init__(self, valName, alignment, config): import random self.name = valName self.alignmentToValidate = alignment self.general = config.getGeneral() self.randomWorkdirPart = "%0i"%random.randint(1,10e9) self.configFiles = [] self.config = config self.jobid = "" theUpdate = config.getResultingSection(self.valType+":"+self.name, defaultDict = self.defaults, demandPars = self.mandatories) self.general.update(theUpdate) self.jobmode = self.general["jobmode"] self.NJobs = int(self.general["parallelJobs"]) self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") # limit maximum number of parallel jobs to 40 # (each output file is approximately 20MB) maximumNumberJobs = 40 if self.NJobs > maximumNumberJobs: msg = ("Maximum allowed number of parallel jobs " +str(maximumNumberJobs)+" exceeded!!!") raise AllInOneError(msg) if self.NJobs > 1 and not isinstance(self, ParallelValidation): raise AllInOneError("Parallel jobs not implemented for {}!\n" "Please set parallelJobs = 1.".format(type(self).__name__)) self.jobid = self.general["jobid"] if self.jobid: try: #make sure it's actually a valid jobid output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) if "is not found" in output: raise RuntimeError except RuntimeError: raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) self.cmssw = self.general["cmssw"] badcharacters = r"\'" for character in badcharacters: if character in self.cmssw: raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" "path name. If you really have it in such a ridiculously named location,\n" "try making a symbolic link somewhere with a decent name.") try: os.listdir(self.cmssw) except OSError: raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') if self.cmssw == os.environ["CMSSW_BASE"]: self.scramarch = os.environ["SCRAM_ARCH"] self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] else: command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') commandoutput = getCommandOutput2(command).split('\n') self.cmssw = commandoutput[0] self.scramarch = commandoutput[1] self.cmsswreleasebase = commandoutput[2] self.packages = {} for package in self.needpackages: for placetolook in self.cmssw, self.cmsswreleasebase: pkgpath = os.path.join(placetolook, "src", package) if os.path.exists(pkgpath): self.packages[package] = pkgpath break else: raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) self.AutoAlternates = True if config.has_option("alternateTemplates","AutoAlternates"): try: self.AutoAlternates = json.loads(config.get("alternateTemplates","AutoAlternates").lower()) except ValueError: raise AllInOneError("AutoAlternates needs to be true or false, not %s" % config.get("alternateTemplates","AutoAlternates")) knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals ignoreOpts = [] config.checkInput(self.valType+":"+self.name, knownSimpleOptions = knownOpts, ignoreOptions = ignoreOpts) def getRepMap(self, alignment = None): from .plottingOptions import PlottingOptions if alignment == None: alignment = self.alignmentToValidate try: result = PlottingOptions(self.config, self.valType) except KeyError: result = {} result.update(alignment.getRepMap()) result.update(self.general) result.update({ "workdir": os.path.join(self.general["workdir"], self.randomWorkdirPart), "datadir": self.general["datadir"], "logdir": self.general["logdir"], "CommandLineTemplate": ("#run configfile and post-proccess it\n" "cmsRun %(cfgFile)s\n" "%(postProcess)s "), "CMSSW_BASE": self.cmssw, "SCRAM_ARCH": self.scramarch, "CMSSW_RELEASE_BASE": self.cmsswreleasebase, "alignmentName": alignment.name, "condLoad": alignment.getConditions(), "LoadGlobalTagTemplate": configTemplates.loadGlobalTagTemplate, }) result.update(self.packages) return result @abstractproperty def filesToCompare(self): pass def getCompareStrings( self, requestId = None, plain = False ): result = {} repMap = self.getRepMap().copy() for validationId in self.filesToCompare: repMap["file"] = self.filesToCompare[ validationId ] if repMap["file"].startswith( "/castor/" ): repMap["file"] = "rfio:%(file)s"%repMap elif repMap["file"].startswith( "/store/" ): repMap["file"] = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if plain: result[validationId]=repMap["file"] else: result[validationId]= "%(file)s=%(title)s|%(color)s|%(style)s"%repMap if requestId == None: return result else: if not "." in requestId: requestId += ".%s"%self.defaultReferenceName if not requestId.split(".")[-1] in result: msg = ("could not find %s in reference Objects!" %requestId.split(".")[-1]) raise AllInOneError(msg) return result[ requestId.split(".")[-1] ] def createFiles(self, fileContents, path, repMap = None, repMaps = None): """repMap: single map for all files repMaps: a dict, with the filenames as the keys""" if repMap is not None and repMaps is not None: raise AllInOneError("createFiles can only take repMap or repMaps (or neither), not both") result = [] for fileName in fileContents: filePath = os.path.join(path, fileName) result.append(filePath) for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)): theFile = open( filePathi, "w" ) fileContentsi = fileContents[ fileName ] if repMaps is not None: repMap = repMaps[fileName] if repMap is not None: repMap.update({"nIndex": str(i)}) fileContentsi = replaceByMap(fileContentsi, repMap) theFile.write( fileContentsi ) theFile.close() return result def createConfiguration(self, fileContents, path, schedule = None, repMap = None, repMaps = None): self.configFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) if not schedule == None: schedule = [os.path.join( path, cfgName) for cfgName in schedule] for cfgName in schedule: if not cfgName in self.configFiles: msg = ("scheduled %s missing in generated configfiles: %s" %(cfgName, self.configFiles)) raise AllInOneError(msg) for cfgName in self.configFiles: if not cfgName in schedule: msg = ("generated configuration %s not scheduled: %s" %(cfgName, schedule)) raise AllInOneError(msg) self.configFiles = schedule return self.configFiles def createScript(self, fileContents, path, downloadFiles=[], repMap = None, repMaps = None): self.scriptFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) for script in self.scriptFiles: for scriptwithindex in addIndex(script, self.NJobs): os.chmod(scriptwithindex,0o755) return self.scriptFiles def createCrabCfg(self, fileContents, path ): if self.NJobs > 1: msg = ("jobmode 'crab' not supported for parallel validation." " Please set parallelJobs = 1.") raise AllInOneError(msg) self.crabConfigFiles = self.createFiles(fileContents, path) return self.crabConfigFiles class GenericValidationData(GenericValidation): """ Subclass of `GenericValidation` which is the base for validations using datasets. """ needParentFiles = False mandatories = {"dataset", "maxevents"} defaults = { "runRange": "", "firstRun": "", "lastRun": "", "begin": "", "end": "", "JSON": "", "dasinstance": "prod/global", "ttrhbuilder":"WithAngleAndTemplate", "usepixelqualityflag": "True", } optionals = {"magneticfield"} def __init__(self, valName, alignment, config): """ This method adds additional items to the `self.general` dictionary which are only needed for validations using datasets. Arguments: - `valName`: String which identifies individual validation instances - `alignment`: `Alignment` instance to validate - `config`: `BetterConfigParser` instance which includes the configuration of the validations """ super(GenericValidationData, self).__init__(valName, alignment, config) # if maxevents is not specified, cannot calculate number of events for # each parallel job, and therefore running only a single job if int( self.general["maxevents"] ) < 0 and self.NJobs > 1: msg = ("Maximum number of events (maxevents) not specified: " "cannot use parallel jobs.") raise AllInOneError(msg) if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs: msg = ("maxevents has to be divisible by parallelJobs") raise AllInOneError(msg) tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == "" and self.general["firstRun"] == "" and self.general["lastRun"] == "" and self.general["begin"] == "" and self.general["end"] == "") if self.general["dataset"] not in globalDictionaries.usedDatasets: globalDictionaries.usedDatasets[self.general["dataset"]] = {} if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]: if globalDictionaries.usedDatasets[self.general["dataset"]] != {}: print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\n" "This is allowed, but make sure it's not a mistake") % self.general["dataset"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None} Bfield = self.general.get("magneticfield", None) if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None: dataset = Dataset( self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst, cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield, dasinstance = self.general["dasinstance"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] self.general["magneticField"] = self.dataset.magneticField() self.general["defaultMagneticField"] = "MagneticField" if self.general["magneticField"] == "unknown": print("Could not get the magnetic field for this dataset.") print("Using the default: ", self.general["defaultMagneticField"]) self.general["magneticField"] = '.oO[defaultMagneticField]Oo.' if not self.jobmode.split( ',' )[0] == "crab": try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], parent = self.needParentFiles ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str(e) raise AllInOneError(msg) else: if self.dataset.predefined(): msg = ("For jobmode 'crab' you cannot use predefined datasets " "(in your case: '%s')."%( self.dataset.name() )) raise AllInOneError( msg ) try: theUpdate = config.getResultingSection(self.valType+":"+self.name, demandPars = ["parallelJobs"]) except AllInOneError as e: msg = str(e)[:-1]+" when using 'jobmode: crab'." raise AllInOneError(msg) self.general.update(theUpdate) if self.general["begin"] or self.general["end"]: ( self.general["begin"], self.general["end"], self.general["firstRun"], self.general["lastRun"] ) = self.dataset.convertTimeToRun( firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], shortTuple = False) if self.general["begin"] == None: self.general["begin"] = "" if self.general["end"] == None: self.general["end"] = "" self.general["firstRun"] = str( self.general["firstRun"] ) self.general["lastRun"] = str( self.general["lastRun"] ) if ( not self.general["firstRun"] ) and \ ( self.general["end"] or self.general["lastRun"] ): self.general["firstRun"] = str( self.dataset.runList()[0]["run_number"]) if ( not self.general["lastRun"] ) and \ ( self.general["begin"] or self.general["firstRun"] ): self.general["lastRun"] = str( self.dataset.runList()[-1]["run_number"]) if self.general["firstRun"] and self.general["lastRun"]: if int(self.general["firstRun"]) > int(self.general["lastRun"]): msg = ( "The lower time/runrange limit ('begin'/'firstRun') " "chosen is greater than the upper time/runrange limit " "('end'/'lastRun').") raise AllInOneError( msg ) self.general["runRange"] = (self.general["firstRun"] + '-' + self.general["lastRun"]) try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], crab = True ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str( e ) raise AllInOneError( msg ) self.general["usepixelqualityflag"] = pythonboolstring(self.general["usepixelqualityflag"], "usepixelqualityflag") def getRepMap(self, alignment = None): result = super(GenericValidationData, self).getRepMap(alignment) outputfile = os.path.expandvars(replaceByMap( "%s_%s_.oO[name]Oo..root" % (self.outputBaseName, self.name) , result)) resultfile = os.path.expandvars(replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./" + "%s_%s_.oO[name]Oo..root" % (self.resultBaseName, self.name)) , result)) result.update({ "resultFile": ".oO[resultFiles[.oO[nIndex]Oo.]]Oo.", "resultFiles": addIndex(resultfile, self.NJobs), "finalResultFile": resultfile, "outputFile": ".oO[outputFiles[.oO[nIndex]Oo.]]Oo.", "outputFiles": addIndex(outputfile, self.NJobs), "finalOutputFile": outputfile, "ProcessName": self.ProcessName, "Bookkeeping": self.Bookkeeping, "LoadBasicModules": self.LoadBasicModules, "TrackSelectionRefitting": self.TrackSelectionRefitting, "ValidationConfig": self.ValidationTemplate, "FileOutputTemplate": self.FileOutputTemplate, "DefinePath": self.DefinePath, }) return result @property def cfgName(self): return "%s.%s.%s_cfg.py"%( self.configBaseName, self.name, self.alignmentToValidate.name ) @abstractproperty def ProcessName(self): pass @property def cfgTemplate(self): return configTemplates.cfgTemplate @abstractproperty def ValidationTemplate(self): pass @property def filesToCompare(self): return {self.defaultReferenceName: self.getRepMap()["finalResultFile"]} def createConfiguration(self, path ): repMap = self.getRepMap() cfgs = {self.cfgName: self.cfgTemplate} super(GenericValidationData, self).createConfiguration(cfgs, path, repMap=repMap) def createScript(self, path, template = configTemplates.scriptTemplate, downloadFiles=[], repMap = None, repMaps = None): scriptName = "%s.%s.%s.sh"%(self.scriptBaseName, self.name, self.alignmentToValidate.name ) if repMap is None and repMaps is None: repMap = self.getRepMap() repMap["CommandLine"]="" for cfg in self.configFiles: repMap["CommandLine"]+= repMap["CommandLineTemplate"]%{"cfgFile":addIndex(cfg, self.NJobs, ".oO[nIndex]Oo."), "postProcess":"" } scripts = {scriptName: template} return super(GenericValidationData, self).createScript(scripts, path, downloadFiles = downloadFiles, repMap = repMap, repMaps = repMaps) # MASKED: createCrabCfg function (lines 462-492) @property def Bookkeeping(self): return configTemplates.Bookkeeping @property def LoadBasicModules(self): return configTemplates.LoadBasicModules @abstractproperty def TrackSelectionRefitting(self): pass @property def FileOutputTemplate(self): return configTemplates.FileOutputTemplate @abstractproperty def DefinePath(self): pass class GenericValidationData_CTSR(GenericValidationData): #common track selection and refitting defaults = { "momentumconstraint": "None", "openmasswindow": "False", "cosmicsdecomode": "True", "removetrackhitfiltercommands": "", "appendtrackhitfiltercommands": "", } def getRepMap(self, alignment=None): result = super(GenericValidationData_CTSR, self).getRepMap(alignment) from .trackSplittingValidation import TrackSplittingValidation result.update({ "ValidationSequence": self.ValidationSequence, "istracksplitting": str(isinstance(self, TrackSplittingValidation)), "cosmics0T": str(self.cosmics0T), "use_d0cut": str(self.use_d0cut), "ispvvalidation": str(self.isPVValidation) }) commands = [] for removeorappend in "remove", "append": optionname = removeorappend + "trackhitfiltercommands" if result[optionname]: for command in result[optionname].split(","): command = command.strip() commands.append('process.TrackerTrackHitFilter.commands.{}("{}")'.format(removeorappend, command)) result["trackhitfiltercommands"] = "\n".join(commands) return result @property def use_d0cut(self): return "Cosmics" not in self.general["trackcollection"] #use it for collisions only @property def isPVValidation(self): return False # only for PV Validation sequence @property def TrackSelectionRefitting(self): return configTemplates.CommonTrackSelectionRefitting @property def DefinePath(self): return configTemplates.DefinePath_CommonSelectionRefitting @abstractproperty def ValidationSequence(self): pass @property def cosmics0T(self): if "Cosmics" not in self.general["trackcollection"]: return False Bfield = self.dataset.magneticFieldForRun() if Bfield < 0.5: return True if isinstance(Bfield, str): if "unknown " in Bfield: msg = Bfield.replace("unknown ","",1) elif Bfield == "unknown": msg = "Can't get the B field for %s." % self.dataset.name() else: msg = "B field = {}???".format(Bfield) raise AllInOneError(msg + "\n" "To use this dataset, specify magneticfield = [value] in your .ini config file.") return False class ParallelValidation(GenericValidation): @classmethod def initMerge(cls): return "" @abstractmethod def appendToMerge(self): pass @classmethod def doInitMerge(cls): from .plottingOptions import PlottingOptions result = cls.initMerge() result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result def doMerge(self): result = self.appendToMerge() if result[-1] != "\n": result += "\n" result += ("if [[ tmpMergeRetCode -eq 0 ]]; then\n" " xrdcp -f .oO[finalOutputFile]Oo. root://eoscms//eos/cms.oO[finalResultFile]Oo.\n" "fi\n" "if [[ ${tmpMergeRetCode} -gt ${mergeRetCode} ]]; then\n" " mergeRetCode=${tmpMergeRetCode}\n" "fi\n") result = replaceByMap(result, self.getRepMap()) return result class ValidationWithPlots(GenericValidation): @classmethod def runPlots(cls, validations): return ("cp .oO[plottingscriptpath]Oo. .\n" "root -x -b -q .oO[plottingscriptname]Oo.++") @abstractmethod def appendToPlots(self): pass @abstractmethod def plottingscriptname(cls): """override with a classmethod""" @abstractmethod def plottingscripttemplate(cls): """override with a classmethod""" @abstractmethod def plotsdirname(cls): """override with a classmethod""" @classmethod def doRunPlots(cls, validations): from .plottingOptions import PlottingOptions cls.createPlottingScript(validations) result = cls.runPlots(validations) result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result @classmethod def createPlottingScript(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() filename = replaceByMap(".oO[plottingscriptpath]Oo.", repmap) repmap["PlottingInstantiation"] = "\n".join( replaceByMap(v.appendToPlots(), v.getRepMap()).rstrip("\n") for v in validations ) plottingscript = replaceByMap(cls.plottingscripttemplate(), repmap) with open(filename, 'w') as f: f.write(plottingscript) class ValidationWithPlotsSummaryBase(ValidationWithPlots): class SummaryItem(object): def __init__(self, name, values, format=None, latexname=None, latexformat=None): """ name: name of the summary item, goes on top of the column values: value for each alignment (in order of rows) format: python format string (default: {:.3g}, meaning up to 3 significant digits) latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) latexformat: format for latex (default: format) """ if format is None: format = "{:.3g}" if latexname is None: latexname = name if latexformat is None: latexformat = format self.__name = name self.__values = values self.__format = format self.__latexname = latexname self.__latexformat = latexformat def name(self, latex=False): if latex: return self.__latexname else: return self.__name def format(self, value, latex=False): if latex: fmt = self.__latexformat else: fmt = self.__format if re.match(".*[{][^}]*[fg][}].*", fmt): value = float(value) return fmt.format(value) def values(self, latex=False): result = [self.format(v, latex=latex) for v in self.__values] return result def value(self, i, latex): return self.values(latex)[i] @abstractmethod def getsummaryitems(cls, folder): """override with a classmethod that returns a list of SummaryItems based on the plots saved in folder""" __summaryitems = None __lastfolder = None @classmethod def summaryitemsstring(cls, folder=None, latex=False, transpose=True): if folder is None: folder = cls.plotsdirname() if folder.startswith( "/castor/" ): folder = "rfio:%(file)s"%repMap elif folder.startswith( "/store/" ): folder = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if cls.__summaryitems is None or cls.__lastfolder != folder: cls.__lastfolder = folder cls.__summaryitems = cls.getsummaryitems(folder) summaryitems = cls.__summaryitems if not summaryitems: raise AllInOneError("No summary items!") size = {len(_.values(latex)) for _ in summaryitems} if len(size) != 1: raise AllInOneError("Some summary items have different numbers of values\n{}".format(size)) size = size.pop() if transpose: columnwidths = ([max(len(_.name(latex)) for _ in summaryitems)] + [max(len(_.value(i, latex)) for _ in summaryitems) for i in range(size)]) else: columnwidths = [max(len(entry) for entry in [_.name(latex)] + _.values(latex)) for _ in summaryitems] if latex: join = " & " else: join = " " row = join.join("{{:{}}}".format(width) for width in columnwidths) if transpose: rows = [row.format(*[_.name(latex)]+_.values(latex)) for _ in summaryitems] else: rows = [] rows.append(row.format(*(_.name for _ in summaryitems))) for i in range(size): rows.append(row.format(*(_.value(i, latex) for _ in summaryitems))) if latex: join = " \\\\\n" else: join = "\n" result = join.join(rows) if latex: result = (r"\begin{{tabular}}{{{}}}".format("|" + "|".join("c"*(len(columnwidths))) + "|") + "\n" + result + "\n" + r"\end{tabular}") return result @classmethod def printsummaryitems(cls, *args, **kwargs): print(cls.summaryitemsstring(*args, **kwargs)) @classmethod def writesummaryitems(cls, filename, *args, **kwargs): with open(filename, "w") as f: f.write(cls.summaryitemsstring(*args, **kwargs)+"\n") class ValidationWithPlotsSummary(ValidationWithPlotsSummaryBase): @classmethod def getsummaryitems(cls, folder): result = [] with open(os.path.join(folder, "{}Summary.txt".format(cls.__name__))) as f: for line in f: split = line.rstrip("\n").split("\t") kwargs = {} for thing in split[:]: if thing.startswith("format="): kwargs["format"] = thing.replace("format=", "", 1) split.remove(thing) if thing.startswith("latexname="): kwargs["latexname"] = thing.replace("latexname=", "", 1) split.remove(thing) if thing.startswith("latexformat="): kwargs["latexformat"] = thing.replace("latexformat=", "", 1) split.remove(thing) name = split[0] values = split[1:] result.append(cls.SummaryItem(name, values, **kwargs)) return result class ValidationWithComparison(GenericValidation): @classmethod def doComparison(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() repmap["compareStrings"] = " , ".join(v.getCompareStrings("OfflineValidation") for v in validations) repmap["compareStringsPlain"] = " , ".join(v.getCompareStrings("OfflineValidation", True) for v in validations) comparison = replaceByMap(cls.comparisontemplate(), repmap) return comparison @classmethod def comparisontemplate(cls): return configTemplates.compareAlignmentsExecution @classmethod def comparealignmentspath(cls): return ".oO[Alignment/OfflineValidation]Oo./scripts/.oO[compareAlignmentsName]Oo." @abstractmethod def comparealignmentsname(cls): """classmethod""" class ValidationForPresentation(ValidationWithPlots): @abstractmethod def presentationsubsections(cls): """classmethod"""
def createCrabCfg(self, path, crabCfgBaseName): """ Method which creates a `crab.cfg` for a validation on datasets. Arguments: - `path`: Path at which the file will be stored. - `crabCfgBaseName`: String which depends on the actual type of validation calling this method. """ crabCfgName = "crab.%s.%s.%s.cfg"%( crabCfgBaseName, self.name, self.alignmentToValidate.name ) repMap = self.getRepMap() repMap["script"] = "dummy_script.sh" # repMap["crabOutputDir"] = os.path.basename( path ) repMap["crabWorkingDir"] = crabCfgName.split( '.cfg' )[0] self.crabWorkingDir = repMap["crabWorkingDir"] repMap["numberOfJobs"] = self.general["parallelJobs"] repMap["cfgFile"] = self.configFiles[0] repMap["queue"] = self.jobmode.split( ',' )[1].split( '-q' )[1] if self.dataset.dataType() == "mc": repMap["McOrData"] = "events = .oO[nEvents]Oo." elif self.dataset.dataType() == "data": repMap["McOrData"] = "lumis = -1" if self.jobmode.split( ',' )[0] == "crab": print ("For jobmode 'crab' the parameter 'maxevents' will be " "ignored and all events will be processed.") else: raise AllInOneError("Unknown data type! Can't run in crab mode") crabCfg = {crabCfgName: replaceByMap( configTemplates.crabCfgTemplate, repMap ) } return super(GenericValidationData, self).createCrabCfg( crabCfg, path )
462
492
from __future__ import print_function from __future__ import absolute_import from builtins import range from abc import ABCMeta, abstractmethod, abstractproperty import os import re import json from . import globalDictionaries from . import configTemplates from .dataset import Dataset from .helperFunctions import replaceByMap, addIndex, getCommandOutput2, boolfromstring, pythonboolstring from .TkAlExceptions import AllInOneError from six import with_metaclass class ValidationMetaClass(ABCMeta): sets = ["mandatories", "optionals", "needpackages"] dicts = ["defaults"] def __new__(cls, clsname, bases, dct): for setname in cls.sets: if setname not in dct: dct[setname] = set() dct[setname] = set.union(dct[setname], *(getattr(base, setname) for base in bases if hasattr(base, setname))) for dictname in cls.dicts: if dictname not in dct: dct[dictname] = {} for base in bases: if not hasattr(base, dictname): continue newdict = getattr(base, dictname) for key in set(newdict) & set(dct[dictname]): if newdict[key] != dct[dictname][key]: raise ValueError("Inconsistent values of defaults[{}]: {}, {}".format(key, newdict[key], dct[dictname][key])) dct[dictname].update(newdict) for setname in cls.sets: #e.g. removemandatories, used in preexistingvalidation #use with caution if "remove"+setname not in dct: dct["remove"+setname] = set() dct["remove"+setname] = set.union(dct["remove"+setname], *(getattr(base, "remove"+setname) for base in bases if hasattr(base, "remove"+setname))) dct[setname] -= dct["remove"+setname] return super(ValidationMetaClass, cls).__new__(cls, clsname, bases, dct) class GenericValidation(with_metaclass(ValidationMetaClass,object)): defaultReferenceName = "DEFAULT" mandatories = set() defaults = { "cmssw": os.environ['CMSSW_BASE'], "parallelJobs": "1", "jobid": "", "needsproxy": "false", } needpackages = {"Alignment/OfflineValidation"} optionals = {"jobmode"} def __init__(self, valName, alignment, config): import random self.name = valName self.alignmentToValidate = alignment self.general = config.getGeneral() self.randomWorkdirPart = "%0i"%random.randint(1,10e9) self.configFiles = [] self.config = config self.jobid = "" theUpdate = config.getResultingSection(self.valType+":"+self.name, defaultDict = self.defaults, demandPars = self.mandatories) self.general.update(theUpdate) self.jobmode = self.general["jobmode"] self.NJobs = int(self.general["parallelJobs"]) self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") # limit maximum number of parallel jobs to 40 # (each output file is approximately 20MB) maximumNumberJobs = 40 if self.NJobs > maximumNumberJobs: msg = ("Maximum allowed number of parallel jobs " +str(maximumNumberJobs)+" exceeded!!!") raise AllInOneError(msg) if self.NJobs > 1 and not isinstance(self, ParallelValidation): raise AllInOneError("Parallel jobs not implemented for {}!\n" "Please set parallelJobs = 1.".format(type(self).__name__)) self.jobid = self.general["jobid"] if self.jobid: try: #make sure it's actually a valid jobid output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) if "is not found" in output: raise RuntimeError except RuntimeError: raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) self.cmssw = self.general["cmssw"] badcharacters = r"\'" for character in badcharacters: if character in self.cmssw: raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" "path name. If you really have it in such a ridiculously named location,\n" "try making a symbolic link somewhere with a decent name.") try: os.listdir(self.cmssw) except OSError: raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') if self.cmssw == os.environ["CMSSW_BASE"]: self.scramarch = os.environ["SCRAM_ARCH"] self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] else: command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') commandoutput = getCommandOutput2(command).split('\n') self.cmssw = commandoutput[0] self.scramarch = commandoutput[1] self.cmsswreleasebase = commandoutput[2] self.packages = {} for package in self.needpackages: for placetolook in self.cmssw, self.cmsswreleasebase: pkgpath = os.path.join(placetolook, "src", package) if os.path.exists(pkgpath): self.packages[package] = pkgpath break else: raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) self.AutoAlternates = True if config.has_option("alternateTemplates","AutoAlternates"): try: self.AutoAlternates = json.loads(config.get("alternateTemplates","AutoAlternates").lower()) except ValueError: raise AllInOneError("AutoAlternates needs to be true or false, not %s" % config.get("alternateTemplates","AutoAlternates")) knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals ignoreOpts = [] config.checkInput(self.valType+":"+self.name, knownSimpleOptions = knownOpts, ignoreOptions = ignoreOpts) def getRepMap(self, alignment = None): from .plottingOptions import PlottingOptions if alignment == None: alignment = self.alignmentToValidate try: result = PlottingOptions(self.config, self.valType) except KeyError: result = {} result.update(alignment.getRepMap()) result.update(self.general) result.update({ "workdir": os.path.join(self.general["workdir"], self.randomWorkdirPart), "datadir": self.general["datadir"], "logdir": self.general["logdir"], "CommandLineTemplate": ("#run configfile and post-proccess it\n" "cmsRun %(cfgFile)s\n" "%(postProcess)s "), "CMSSW_BASE": self.cmssw, "SCRAM_ARCH": self.scramarch, "CMSSW_RELEASE_BASE": self.cmsswreleasebase, "alignmentName": alignment.name, "condLoad": alignment.getConditions(), "LoadGlobalTagTemplate": configTemplates.loadGlobalTagTemplate, }) result.update(self.packages) return result @abstractproperty def filesToCompare(self): pass def getCompareStrings( self, requestId = None, plain = False ): result = {} repMap = self.getRepMap().copy() for validationId in self.filesToCompare: repMap["file"] = self.filesToCompare[ validationId ] if repMap["file"].startswith( "/castor/" ): repMap["file"] = "rfio:%(file)s"%repMap elif repMap["file"].startswith( "/store/" ): repMap["file"] = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if plain: result[validationId]=repMap["file"] else: result[validationId]= "%(file)s=%(title)s|%(color)s|%(style)s"%repMap if requestId == None: return result else: if not "." in requestId: requestId += ".%s"%self.defaultReferenceName if not requestId.split(".")[-1] in result: msg = ("could not find %s in reference Objects!" %requestId.split(".")[-1]) raise AllInOneError(msg) return result[ requestId.split(".")[-1] ] def createFiles(self, fileContents, path, repMap = None, repMaps = None): """repMap: single map for all files repMaps: a dict, with the filenames as the keys""" if repMap is not None and repMaps is not None: raise AllInOneError("createFiles can only take repMap or repMaps (or neither), not both") result = [] for fileName in fileContents: filePath = os.path.join(path, fileName) result.append(filePath) for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)): theFile = open( filePathi, "w" ) fileContentsi = fileContents[ fileName ] if repMaps is not None: repMap = repMaps[fileName] if repMap is not None: repMap.update({"nIndex": str(i)}) fileContentsi = replaceByMap(fileContentsi, repMap) theFile.write( fileContentsi ) theFile.close() return result def createConfiguration(self, fileContents, path, schedule = None, repMap = None, repMaps = None): self.configFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) if not schedule == None: schedule = [os.path.join( path, cfgName) for cfgName in schedule] for cfgName in schedule: if not cfgName in self.configFiles: msg = ("scheduled %s missing in generated configfiles: %s" %(cfgName, self.configFiles)) raise AllInOneError(msg) for cfgName in self.configFiles: if not cfgName in schedule: msg = ("generated configuration %s not scheduled: %s" %(cfgName, schedule)) raise AllInOneError(msg) self.configFiles = schedule return self.configFiles def createScript(self, fileContents, path, downloadFiles=[], repMap = None, repMaps = None): self.scriptFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) for script in self.scriptFiles: for scriptwithindex in addIndex(script, self.NJobs): os.chmod(scriptwithindex,0o755) return self.scriptFiles def createCrabCfg(self, fileContents, path ): if self.NJobs > 1: msg = ("jobmode 'crab' not supported for parallel validation." " Please set parallelJobs = 1.") raise AllInOneError(msg) self.crabConfigFiles = self.createFiles(fileContents, path) return self.crabConfigFiles class GenericValidationData(GenericValidation): """ Subclass of `GenericValidation` which is the base for validations using datasets. """ needParentFiles = False mandatories = {"dataset", "maxevents"} defaults = { "runRange": "", "firstRun": "", "lastRun": "", "begin": "", "end": "", "JSON": "", "dasinstance": "prod/global", "ttrhbuilder":"WithAngleAndTemplate", "usepixelqualityflag": "True", } optionals = {"magneticfield"} def __init__(self, valName, alignment, config): """ This method adds additional items to the `self.general` dictionary which are only needed for validations using datasets. Arguments: - `valName`: String which identifies individual validation instances - `alignment`: `Alignment` instance to validate - `config`: `BetterConfigParser` instance which includes the configuration of the validations """ super(GenericValidationData, self).__init__(valName, alignment, config) # if maxevents is not specified, cannot calculate number of events for # each parallel job, and therefore running only a single job if int( self.general["maxevents"] ) < 0 and self.NJobs > 1: msg = ("Maximum number of events (maxevents) not specified: " "cannot use parallel jobs.") raise AllInOneError(msg) if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs: msg = ("maxevents has to be divisible by parallelJobs") raise AllInOneError(msg) tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == "" and self.general["firstRun"] == "" and self.general["lastRun"] == "" and self.general["begin"] == "" and self.general["end"] == "") if self.general["dataset"] not in globalDictionaries.usedDatasets: globalDictionaries.usedDatasets[self.general["dataset"]] = {} if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]: if globalDictionaries.usedDatasets[self.general["dataset"]] != {}: print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\n" "This is allowed, but make sure it's not a mistake") % self.general["dataset"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None} Bfield = self.general.get("magneticfield", None) if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None: dataset = Dataset( self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst, cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield, dasinstance = self.general["dasinstance"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] self.general["magneticField"] = self.dataset.magneticField() self.general["defaultMagneticField"] = "MagneticField" if self.general["magneticField"] == "unknown": print("Could not get the magnetic field for this dataset.") print("Using the default: ", self.general["defaultMagneticField"]) self.general["magneticField"] = '.oO[defaultMagneticField]Oo.' if not self.jobmode.split( ',' )[0] == "crab": try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], parent = self.needParentFiles ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str(e) raise AllInOneError(msg) else: if self.dataset.predefined(): msg = ("For jobmode 'crab' you cannot use predefined datasets " "(in your case: '%s')."%( self.dataset.name() )) raise AllInOneError( msg ) try: theUpdate = config.getResultingSection(self.valType+":"+self.name, demandPars = ["parallelJobs"]) except AllInOneError as e: msg = str(e)[:-1]+" when using 'jobmode: crab'." raise AllInOneError(msg) self.general.update(theUpdate) if self.general["begin"] or self.general["end"]: ( self.general["begin"], self.general["end"], self.general["firstRun"], self.general["lastRun"] ) = self.dataset.convertTimeToRun( firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], shortTuple = False) if self.general["begin"] == None: self.general["begin"] = "" if self.general["end"] == None: self.general["end"] = "" self.general["firstRun"] = str( self.general["firstRun"] ) self.general["lastRun"] = str( self.general["lastRun"] ) if ( not self.general["firstRun"] ) and \ ( self.general["end"] or self.general["lastRun"] ): self.general["firstRun"] = str( self.dataset.runList()[0]["run_number"]) if ( not self.general["lastRun"] ) and \ ( self.general["begin"] or self.general["firstRun"] ): self.general["lastRun"] = str( self.dataset.runList()[-1]["run_number"]) if self.general["firstRun"] and self.general["lastRun"]: if int(self.general["firstRun"]) > int(self.general["lastRun"]): msg = ( "The lower time/runrange limit ('begin'/'firstRun') " "chosen is greater than the upper time/runrange limit " "('end'/'lastRun').") raise AllInOneError( msg ) self.general["runRange"] = (self.general["firstRun"] + '-' + self.general["lastRun"]) try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], crab = True ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str( e ) raise AllInOneError( msg ) self.general["usepixelqualityflag"] = pythonboolstring(self.general["usepixelqualityflag"], "usepixelqualityflag") def getRepMap(self, alignment = None): result = super(GenericValidationData, self).getRepMap(alignment) outputfile = os.path.expandvars(replaceByMap( "%s_%s_.oO[name]Oo..root" % (self.outputBaseName, self.name) , result)) resultfile = os.path.expandvars(replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./" + "%s_%s_.oO[name]Oo..root" % (self.resultBaseName, self.name)) , result)) result.update({ "resultFile": ".oO[resultFiles[.oO[nIndex]Oo.]]Oo.", "resultFiles": addIndex(resultfile, self.NJobs), "finalResultFile": resultfile, "outputFile": ".oO[outputFiles[.oO[nIndex]Oo.]]Oo.", "outputFiles": addIndex(outputfile, self.NJobs), "finalOutputFile": outputfile, "ProcessName": self.ProcessName, "Bookkeeping": self.Bookkeeping, "LoadBasicModules": self.LoadBasicModules, "TrackSelectionRefitting": self.TrackSelectionRefitting, "ValidationConfig": self.ValidationTemplate, "FileOutputTemplate": self.FileOutputTemplate, "DefinePath": self.DefinePath, }) return result @property def cfgName(self): return "%s.%s.%s_cfg.py"%( self.configBaseName, self.name, self.alignmentToValidate.name ) @abstractproperty def ProcessName(self): pass @property def cfgTemplate(self): return configTemplates.cfgTemplate @abstractproperty def ValidationTemplate(self): pass @property def filesToCompare(self): return {self.defaultReferenceName: self.getRepMap()["finalResultFile"]} def createConfiguration(self, path ): repMap = self.getRepMap() cfgs = {self.cfgName: self.cfgTemplate} super(GenericValidationData, self).createConfiguration(cfgs, path, repMap=repMap) def createScript(self, path, template = configTemplates.scriptTemplate, downloadFiles=[], repMap = None, repMaps = None): scriptName = "%s.%s.%s.sh"%(self.scriptBaseName, self.name, self.alignmentToValidate.name ) if repMap is None and repMaps is None: repMap = self.getRepMap() repMap["CommandLine"]="" for cfg in self.configFiles: repMap["CommandLine"]+= repMap["CommandLineTemplate"]%{"cfgFile":addIndex(cfg, self.NJobs, ".oO[nIndex]Oo."), "postProcess":"" } scripts = {scriptName: template} return super(GenericValidationData, self).createScript(scripts, path, downloadFiles = downloadFiles, repMap = repMap, repMaps = repMaps) def createCrabCfg(self, path, crabCfgBaseName): """ Method which creates a `crab.cfg` for a validation on datasets. Arguments: - `path`: Path at which the file will be stored. - `crabCfgBaseName`: String which depends on the actual type of validation calling this method. """ crabCfgName = "crab.%s.%s.%s.cfg"%( crabCfgBaseName, self.name, self.alignmentToValidate.name ) repMap = self.getRepMap() repMap["script"] = "dummy_script.sh" # repMap["crabOutputDir"] = os.path.basename( path ) repMap["crabWorkingDir"] = crabCfgName.split( '.cfg' )[0] self.crabWorkingDir = repMap["crabWorkingDir"] repMap["numberOfJobs"] = self.general["parallelJobs"] repMap["cfgFile"] = self.configFiles[0] repMap["queue"] = self.jobmode.split( ',' )[1].split( '-q' )[1] if self.dataset.dataType() == "mc": repMap["McOrData"] = "events = .oO[nEvents]Oo." elif self.dataset.dataType() == "data": repMap["McOrData"] = "lumis = -1" if self.jobmode.split( ',' )[0] == "crab": print ("For jobmode 'crab' the parameter 'maxevents' will be " "ignored and all events will be processed.") else: raise AllInOneError("Unknown data type! Can't run in crab mode") crabCfg = {crabCfgName: replaceByMap( configTemplates.crabCfgTemplate, repMap ) } return super(GenericValidationData, self).createCrabCfg( crabCfg, path ) @property def Bookkeeping(self): return configTemplates.Bookkeeping @property def LoadBasicModules(self): return configTemplates.LoadBasicModules @abstractproperty def TrackSelectionRefitting(self): pass @property def FileOutputTemplate(self): return configTemplates.FileOutputTemplate @abstractproperty def DefinePath(self): pass class GenericValidationData_CTSR(GenericValidationData): #common track selection and refitting defaults = { "momentumconstraint": "None", "openmasswindow": "False", "cosmicsdecomode": "True", "removetrackhitfiltercommands": "", "appendtrackhitfiltercommands": "", } def getRepMap(self, alignment=None): result = super(GenericValidationData_CTSR, self).getRepMap(alignment) from .trackSplittingValidation import TrackSplittingValidation result.update({ "ValidationSequence": self.ValidationSequence, "istracksplitting": str(isinstance(self, TrackSplittingValidation)), "cosmics0T": str(self.cosmics0T), "use_d0cut": str(self.use_d0cut), "ispvvalidation": str(self.isPVValidation) }) commands = [] for removeorappend in "remove", "append": optionname = removeorappend + "trackhitfiltercommands" if result[optionname]: for command in result[optionname].split(","): command = command.strip() commands.append('process.TrackerTrackHitFilter.commands.{}("{}")'.format(removeorappend, command)) result["trackhitfiltercommands"] = "\n".join(commands) return result @property def use_d0cut(self): return "Cosmics" not in self.general["trackcollection"] #use it for collisions only @property def isPVValidation(self): return False # only for PV Validation sequence @property def TrackSelectionRefitting(self): return configTemplates.CommonTrackSelectionRefitting @property def DefinePath(self): return configTemplates.DefinePath_CommonSelectionRefitting @abstractproperty def ValidationSequence(self): pass @property def cosmics0T(self): if "Cosmics" not in self.general["trackcollection"]: return False Bfield = self.dataset.magneticFieldForRun() if Bfield < 0.5: return True if isinstance(Bfield, str): if "unknown " in Bfield: msg = Bfield.replace("unknown ","",1) elif Bfield == "unknown": msg = "Can't get the B field for %s." % self.dataset.name() else: msg = "B field = {}???".format(Bfield) raise AllInOneError(msg + "\n" "To use this dataset, specify magneticfield = [value] in your .ini config file.") return False class ParallelValidation(GenericValidation): @classmethod def initMerge(cls): return "" @abstractmethod def appendToMerge(self): pass @classmethod def doInitMerge(cls): from .plottingOptions import PlottingOptions result = cls.initMerge() result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result def doMerge(self): result = self.appendToMerge() if result[-1] != "\n": result += "\n" result += ("if [[ tmpMergeRetCode -eq 0 ]]; then\n" " xrdcp -f .oO[finalOutputFile]Oo. root://eoscms//eos/cms.oO[finalResultFile]Oo.\n" "fi\n" "if [[ ${tmpMergeRetCode} -gt ${mergeRetCode} ]]; then\n" " mergeRetCode=${tmpMergeRetCode}\n" "fi\n") result = replaceByMap(result, self.getRepMap()) return result class ValidationWithPlots(GenericValidation): @classmethod def runPlots(cls, validations): return ("cp .oO[plottingscriptpath]Oo. .\n" "root -x -b -q .oO[plottingscriptname]Oo.++") @abstractmethod def appendToPlots(self): pass @abstractmethod def plottingscriptname(cls): """override with a classmethod""" @abstractmethod def plottingscripttemplate(cls): """override with a classmethod""" @abstractmethod def plotsdirname(cls): """override with a classmethod""" @classmethod def doRunPlots(cls, validations): from .plottingOptions import PlottingOptions cls.createPlottingScript(validations) result = cls.runPlots(validations) result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result @classmethod def createPlottingScript(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() filename = replaceByMap(".oO[plottingscriptpath]Oo.", repmap) repmap["PlottingInstantiation"] = "\n".join( replaceByMap(v.appendToPlots(), v.getRepMap()).rstrip("\n") for v in validations ) plottingscript = replaceByMap(cls.plottingscripttemplate(), repmap) with open(filename, 'w') as f: f.write(plottingscript) class ValidationWithPlotsSummaryBase(ValidationWithPlots): class SummaryItem(object): def __init__(self, name, values, format=None, latexname=None, latexformat=None): """ name: name of the summary item, goes on top of the column values: value for each alignment (in order of rows) format: python format string (default: {:.3g}, meaning up to 3 significant digits) latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) latexformat: format for latex (default: format) """ if format is None: format = "{:.3g}" if latexname is None: latexname = name if latexformat is None: latexformat = format self.__name = name self.__values = values self.__format = format self.__latexname = latexname self.__latexformat = latexformat def name(self, latex=False): if latex: return self.__latexname else: return self.__name def format(self, value, latex=False): if latex: fmt = self.__latexformat else: fmt = self.__format if re.match(".*[{][^}]*[fg][}].*", fmt): value = float(value) return fmt.format(value) def values(self, latex=False): result = [self.format(v, latex=latex) for v in self.__values] return result def value(self, i, latex): return self.values(latex)[i] @abstractmethod def getsummaryitems(cls, folder): """override with a classmethod that returns a list of SummaryItems based on the plots saved in folder""" __summaryitems = None __lastfolder = None @classmethod def summaryitemsstring(cls, folder=None, latex=False, transpose=True): if folder is None: folder = cls.plotsdirname() if folder.startswith( "/castor/" ): folder = "rfio:%(file)s"%repMap elif folder.startswith( "/store/" ): folder = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if cls.__summaryitems is None or cls.__lastfolder != folder: cls.__lastfolder = folder cls.__summaryitems = cls.getsummaryitems(folder) summaryitems = cls.__summaryitems if not summaryitems: raise AllInOneError("No summary items!") size = {len(_.values(latex)) for _ in summaryitems} if len(size) != 1: raise AllInOneError("Some summary items have different numbers of values\n{}".format(size)) size = size.pop() if transpose: columnwidths = ([max(len(_.name(latex)) for _ in summaryitems)] + [max(len(_.value(i, latex)) for _ in summaryitems) for i in range(size)]) else: columnwidths = [max(len(entry) for entry in [_.name(latex)] + _.values(latex)) for _ in summaryitems] if latex: join = " & " else: join = " " row = join.join("{{:{}}}".format(width) for width in columnwidths) if transpose: rows = [row.format(*[_.name(latex)]+_.values(latex)) for _ in summaryitems] else: rows = [] rows.append(row.format(*(_.name for _ in summaryitems))) for i in range(size): rows.append(row.format(*(_.value(i, latex) for _ in summaryitems))) if latex: join = " \\\\\n" else: join = "\n" result = join.join(rows) if latex: result = (r"\begin{{tabular}}{{{}}}".format("|" + "|".join("c"*(len(columnwidths))) + "|") + "\n" + result + "\n" + r"\end{tabular}") return result @classmethod def printsummaryitems(cls, *args, **kwargs): print(cls.summaryitemsstring(*args, **kwargs)) @classmethod def writesummaryitems(cls, filename, *args, **kwargs): with open(filename, "w") as f: f.write(cls.summaryitemsstring(*args, **kwargs)+"\n") class ValidationWithPlotsSummary(ValidationWithPlotsSummaryBase): @classmethod def getsummaryitems(cls, folder): result = [] with open(os.path.join(folder, "{}Summary.txt".format(cls.__name__))) as f: for line in f: split = line.rstrip("\n").split("\t") kwargs = {} for thing in split[:]: if thing.startswith("format="): kwargs["format"] = thing.replace("format=", "", 1) split.remove(thing) if thing.startswith("latexname="): kwargs["latexname"] = thing.replace("latexname=", "", 1) split.remove(thing) if thing.startswith("latexformat="): kwargs["latexformat"] = thing.replace("latexformat=", "", 1) split.remove(thing) name = split[0] values = split[1:] result.append(cls.SummaryItem(name, values, **kwargs)) return result class ValidationWithComparison(GenericValidation): @classmethod def doComparison(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() repmap["compareStrings"] = " , ".join(v.getCompareStrings("OfflineValidation") for v in validations) repmap["compareStringsPlain"] = " , ".join(v.getCompareStrings("OfflineValidation", True) for v in validations) comparison = replaceByMap(cls.comparisontemplate(), repmap) return comparison @classmethod def comparisontemplate(cls): return configTemplates.compareAlignmentsExecution @classmethod def comparealignmentspath(cls): return ".oO[Alignment/OfflineValidation]Oo./scripts/.oO[compareAlignmentsName]Oo." @abstractmethod def comparealignmentsname(cls): """classmethod""" class ValidationForPresentation(ValidationWithPlots): @abstractmethod def presentationsubsections(cls): """classmethod"""
__init__
name: name of the summary item, goes on top of the column values: value for each alignment (in order of rows) format: python format string (default: {:.3g}, meaning up to 3 significant digits) latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) latexformat: format for latex (default: format)
from __future__ import print_function from __future__ import absolute_import from builtins import range from abc import ABCMeta, abstractmethod, abstractproperty import os import re import json from . import globalDictionaries from . import configTemplates from .dataset import Dataset from .helperFunctions import replaceByMap, addIndex, getCommandOutput2, boolfromstring, pythonboolstring from .TkAlExceptions import AllInOneError from six import with_metaclass class ValidationMetaClass(ABCMeta): sets = ["mandatories", "optionals", "needpackages"] dicts = ["defaults"] def __new__(cls, clsname, bases, dct): for setname in cls.sets: if setname not in dct: dct[setname] = set() dct[setname] = set.union(dct[setname], *(getattr(base, setname) for base in bases if hasattr(base, setname))) for dictname in cls.dicts: if dictname not in dct: dct[dictname] = {} for base in bases: if not hasattr(base, dictname): continue newdict = getattr(base, dictname) for key in set(newdict) & set(dct[dictname]): if newdict[key] != dct[dictname][key]: raise ValueError("Inconsistent values of defaults[{}]: {}, {}".format(key, newdict[key], dct[dictname][key])) dct[dictname].update(newdict) for setname in cls.sets: #e.g. removemandatories, used in preexistingvalidation #use with caution if "remove"+setname not in dct: dct["remove"+setname] = set() dct["remove"+setname] = set.union(dct["remove"+setname], *(getattr(base, "remove"+setname) for base in bases if hasattr(base, "remove"+setname))) dct[setname] -= dct["remove"+setname] return super(ValidationMetaClass, cls).__new__(cls, clsname, bases, dct) class GenericValidation(with_metaclass(ValidationMetaClass,object)): defaultReferenceName = "DEFAULT" mandatories = set() defaults = { "cmssw": os.environ['CMSSW_BASE'], "parallelJobs": "1", "jobid": "", "needsproxy": "false", } needpackages = {"Alignment/OfflineValidation"} optionals = {"jobmode"} def __init__(self, valName, alignment, config): import random self.name = valName self.alignmentToValidate = alignment self.general = config.getGeneral() self.randomWorkdirPart = "%0i"%random.randint(1,10e9) self.configFiles = [] self.config = config self.jobid = "" theUpdate = config.getResultingSection(self.valType+":"+self.name, defaultDict = self.defaults, demandPars = self.mandatories) self.general.update(theUpdate) self.jobmode = self.general["jobmode"] self.NJobs = int(self.general["parallelJobs"]) self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") # limit maximum number of parallel jobs to 40 # (each output file is approximately 20MB) maximumNumberJobs = 40 if self.NJobs > maximumNumberJobs: msg = ("Maximum allowed number of parallel jobs " +str(maximumNumberJobs)+" exceeded!!!") raise AllInOneError(msg) if self.NJobs > 1 and not isinstance(self, ParallelValidation): raise AllInOneError("Parallel jobs not implemented for {}!\n" "Please set parallelJobs = 1.".format(type(self).__name__)) self.jobid = self.general["jobid"] if self.jobid: try: #make sure it's actually a valid jobid output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) if "is not found" in output: raise RuntimeError except RuntimeError: raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) self.cmssw = self.general["cmssw"] badcharacters = r"\'" for character in badcharacters: if character in self.cmssw: raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" "path name. If you really have it in such a ridiculously named location,\n" "try making a symbolic link somewhere with a decent name.") try: os.listdir(self.cmssw) except OSError: raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') if self.cmssw == os.environ["CMSSW_BASE"]: self.scramarch = os.environ["SCRAM_ARCH"] self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] else: command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') commandoutput = getCommandOutput2(command).split('\n') self.cmssw = commandoutput[0] self.scramarch = commandoutput[1] self.cmsswreleasebase = commandoutput[2] self.packages = {} for package in self.needpackages: for placetolook in self.cmssw, self.cmsswreleasebase: pkgpath = os.path.join(placetolook, "src", package) if os.path.exists(pkgpath): self.packages[package] = pkgpath break else: raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) self.AutoAlternates = True if config.has_option("alternateTemplates","AutoAlternates"): try: self.AutoAlternates = json.loads(config.get("alternateTemplates","AutoAlternates").lower()) except ValueError: raise AllInOneError("AutoAlternates needs to be true or false, not %s" % config.get("alternateTemplates","AutoAlternates")) knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals ignoreOpts = [] config.checkInput(self.valType+":"+self.name, knownSimpleOptions = knownOpts, ignoreOptions = ignoreOpts) def getRepMap(self, alignment = None): from .plottingOptions import PlottingOptions if alignment == None: alignment = self.alignmentToValidate try: result = PlottingOptions(self.config, self.valType) except KeyError: result = {} result.update(alignment.getRepMap()) result.update(self.general) result.update({ "workdir": os.path.join(self.general["workdir"], self.randomWorkdirPart), "datadir": self.general["datadir"], "logdir": self.general["logdir"], "CommandLineTemplate": ("#run configfile and post-proccess it\n" "cmsRun %(cfgFile)s\n" "%(postProcess)s "), "CMSSW_BASE": self.cmssw, "SCRAM_ARCH": self.scramarch, "CMSSW_RELEASE_BASE": self.cmsswreleasebase, "alignmentName": alignment.name, "condLoad": alignment.getConditions(), "LoadGlobalTagTemplate": configTemplates.loadGlobalTagTemplate, }) result.update(self.packages) return result @abstractproperty def filesToCompare(self): pass def getCompareStrings( self, requestId = None, plain = False ): result = {} repMap = self.getRepMap().copy() for validationId in self.filesToCompare: repMap["file"] = self.filesToCompare[ validationId ] if repMap["file"].startswith( "/castor/" ): repMap["file"] = "rfio:%(file)s"%repMap elif repMap["file"].startswith( "/store/" ): repMap["file"] = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if plain: result[validationId]=repMap["file"] else: result[validationId]= "%(file)s=%(title)s|%(color)s|%(style)s"%repMap if requestId == None: return result else: if not "." in requestId: requestId += ".%s"%self.defaultReferenceName if not requestId.split(".")[-1] in result: msg = ("could not find %s in reference Objects!" %requestId.split(".")[-1]) raise AllInOneError(msg) return result[ requestId.split(".")[-1] ] def createFiles(self, fileContents, path, repMap = None, repMaps = None): """repMap: single map for all files repMaps: a dict, with the filenames as the keys""" if repMap is not None and repMaps is not None: raise AllInOneError("createFiles can only take repMap or repMaps (or neither), not both") result = [] for fileName in fileContents: filePath = os.path.join(path, fileName) result.append(filePath) for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)): theFile = open( filePathi, "w" ) fileContentsi = fileContents[ fileName ] if repMaps is not None: repMap = repMaps[fileName] if repMap is not None: repMap.update({"nIndex": str(i)}) fileContentsi = replaceByMap(fileContentsi, repMap) theFile.write( fileContentsi ) theFile.close() return result def createConfiguration(self, fileContents, path, schedule = None, repMap = None, repMaps = None): self.configFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) if not schedule == None: schedule = [os.path.join( path, cfgName) for cfgName in schedule] for cfgName in schedule: if not cfgName in self.configFiles: msg = ("scheduled %s missing in generated configfiles: %s" %(cfgName, self.configFiles)) raise AllInOneError(msg) for cfgName in self.configFiles: if not cfgName in schedule: msg = ("generated configuration %s not scheduled: %s" %(cfgName, schedule)) raise AllInOneError(msg) self.configFiles = schedule return self.configFiles def createScript(self, fileContents, path, downloadFiles=[], repMap = None, repMaps = None): self.scriptFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) for script in self.scriptFiles: for scriptwithindex in addIndex(script, self.NJobs): os.chmod(scriptwithindex,0o755) return self.scriptFiles def createCrabCfg(self, fileContents, path ): if self.NJobs > 1: msg = ("jobmode 'crab' not supported for parallel validation." " Please set parallelJobs = 1.") raise AllInOneError(msg) self.crabConfigFiles = self.createFiles(fileContents, path) return self.crabConfigFiles class GenericValidationData(GenericValidation): """ Subclass of `GenericValidation` which is the base for validations using datasets. """ needParentFiles = False mandatories = {"dataset", "maxevents"} defaults = { "runRange": "", "firstRun": "", "lastRun": "", "begin": "", "end": "", "JSON": "", "dasinstance": "prod/global", "ttrhbuilder":"WithAngleAndTemplate", "usepixelqualityflag": "True", } optionals = {"magneticfield"} def __init__(self, valName, alignment, config): """ This method adds additional items to the `self.general` dictionary which are only needed for validations using datasets. Arguments: - `valName`: String which identifies individual validation instances - `alignment`: `Alignment` instance to validate - `config`: `BetterConfigParser` instance which includes the configuration of the validations """ super(GenericValidationData, self).__init__(valName, alignment, config) # if maxevents is not specified, cannot calculate number of events for # each parallel job, and therefore running only a single job if int( self.general["maxevents"] ) < 0 and self.NJobs > 1: msg = ("Maximum number of events (maxevents) not specified: " "cannot use parallel jobs.") raise AllInOneError(msg) if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs: msg = ("maxevents has to be divisible by parallelJobs") raise AllInOneError(msg) tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == "" and self.general["firstRun"] == "" and self.general["lastRun"] == "" and self.general["begin"] == "" and self.general["end"] == "") if self.general["dataset"] not in globalDictionaries.usedDatasets: globalDictionaries.usedDatasets[self.general["dataset"]] = {} if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]: if globalDictionaries.usedDatasets[self.general["dataset"]] != {}: print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\n" "This is allowed, but make sure it's not a mistake") % self.general["dataset"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None} Bfield = self.general.get("magneticfield", None) if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None: dataset = Dataset( self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst, cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield, dasinstance = self.general["dasinstance"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] self.general["magneticField"] = self.dataset.magneticField() self.general["defaultMagneticField"] = "MagneticField" if self.general["magneticField"] == "unknown": print("Could not get the magnetic field for this dataset.") print("Using the default: ", self.general["defaultMagneticField"]) self.general["magneticField"] = '.oO[defaultMagneticField]Oo.' if not self.jobmode.split( ',' )[0] == "crab": try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], parent = self.needParentFiles ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str(e) raise AllInOneError(msg) else: if self.dataset.predefined(): msg = ("For jobmode 'crab' you cannot use predefined datasets " "(in your case: '%s')."%( self.dataset.name() )) raise AllInOneError( msg ) try: theUpdate = config.getResultingSection(self.valType+":"+self.name, demandPars = ["parallelJobs"]) except AllInOneError as e: msg = str(e)[:-1]+" when using 'jobmode: crab'." raise AllInOneError(msg) self.general.update(theUpdate) if self.general["begin"] or self.general["end"]: ( self.general["begin"], self.general["end"], self.general["firstRun"], self.general["lastRun"] ) = self.dataset.convertTimeToRun( firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], shortTuple = False) if self.general["begin"] == None: self.general["begin"] = "" if self.general["end"] == None: self.general["end"] = "" self.general["firstRun"] = str( self.general["firstRun"] ) self.general["lastRun"] = str( self.general["lastRun"] ) if ( not self.general["firstRun"] ) and \ ( self.general["end"] or self.general["lastRun"] ): self.general["firstRun"] = str( self.dataset.runList()[0]["run_number"]) if ( not self.general["lastRun"] ) and \ ( self.general["begin"] or self.general["firstRun"] ): self.general["lastRun"] = str( self.dataset.runList()[-1]["run_number"]) if self.general["firstRun"] and self.general["lastRun"]: if int(self.general["firstRun"]) > int(self.general["lastRun"]): msg = ( "The lower time/runrange limit ('begin'/'firstRun') " "chosen is greater than the upper time/runrange limit " "('end'/'lastRun').") raise AllInOneError( msg ) self.general["runRange"] = (self.general["firstRun"] + '-' + self.general["lastRun"]) try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], crab = True ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str( e ) raise AllInOneError( msg ) self.general["usepixelqualityflag"] = pythonboolstring(self.general["usepixelqualityflag"], "usepixelqualityflag") def getRepMap(self, alignment = None): result = super(GenericValidationData, self).getRepMap(alignment) outputfile = os.path.expandvars(replaceByMap( "%s_%s_.oO[name]Oo..root" % (self.outputBaseName, self.name) , result)) resultfile = os.path.expandvars(replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./" + "%s_%s_.oO[name]Oo..root" % (self.resultBaseName, self.name)) , result)) result.update({ "resultFile": ".oO[resultFiles[.oO[nIndex]Oo.]]Oo.", "resultFiles": addIndex(resultfile, self.NJobs), "finalResultFile": resultfile, "outputFile": ".oO[outputFiles[.oO[nIndex]Oo.]]Oo.", "outputFiles": addIndex(outputfile, self.NJobs), "finalOutputFile": outputfile, "ProcessName": self.ProcessName, "Bookkeeping": self.Bookkeeping, "LoadBasicModules": self.LoadBasicModules, "TrackSelectionRefitting": self.TrackSelectionRefitting, "ValidationConfig": self.ValidationTemplate, "FileOutputTemplate": self.FileOutputTemplate, "DefinePath": self.DefinePath, }) return result @property def cfgName(self): return "%s.%s.%s_cfg.py"%( self.configBaseName, self.name, self.alignmentToValidate.name ) @abstractproperty def ProcessName(self): pass @property def cfgTemplate(self): return configTemplates.cfgTemplate @abstractproperty def ValidationTemplate(self): pass @property def filesToCompare(self): return {self.defaultReferenceName: self.getRepMap()["finalResultFile"]} def createConfiguration(self, path ): repMap = self.getRepMap() cfgs = {self.cfgName: self.cfgTemplate} super(GenericValidationData, self).createConfiguration(cfgs, path, repMap=repMap) def createScript(self, path, template = configTemplates.scriptTemplate, downloadFiles=[], repMap = None, repMaps = None): scriptName = "%s.%s.%s.sh"%(self.scriptBaseName, self.name, self.alignmentToValidate.name ) if repMap is None and repMaps is None: repMap = self.getRepMap() repMap["CommandLine"]="" for cfg in self.configFiles: repMap["CommandLine"]+= repMap["CommandLineTemplate"]%{"cfgFile":addIndex(cfg, self.NJobs, ".oO[nIndex]Oo."), "postProcess":"" } scripts = {scriptName: template} return super(GenericValidationData, self).createScript(scripts, path, downloadFiles = downloadFiles, repMap = repMap, repMaps = repMaps) def createCrabCfg(self, path, crabCfgBaseName): """ Method which creates a `crab.cfg` for a validation on datasets. Arguments: - `path`: Path at which the file will be stored. - `crabCfgBaseName`: String which depends on the actual type of validation calling this method. """ crabCfgName = "crab.%s.%s.%s.cfg"%( crabCfgBaseName, self.name, self.alignmentToValidate.name ) repMap = self.getRepMap() repMap["script"] = "dummy_script.sh" # repMap["crabOutputDir"] = os.path.basename( path ) repMap["crabWorkingDir"] = crabCfgName.split( '.cfg' )[0] self.crabWorkingDir = repMap["crabWorkingDir"] repMap["numberOfJobs"] = self.general["parallelJobs"] repMap["cfgFile"] = self.configFiles[0] repMap["queue"] = self.jobmode.split( ',' )[1].split( '-q' )[1] if self.dataset.dataType() == "mc": repMap["McOrData"] = "events = .oO[nEvents]Oo." elif self.dataset.dataType() == "data": repMap["McOrData"] = "lumis = -1" if self.jobmode.split( ',' )[0] == "crab": print ("For jobmode 'crab' the parameter 'maxevents' will be " "ignored and all events will be processed.") else: raise AllInOneError("Unknown data type! Can't run in crab mode") crabCfg = {crabCfgName: replaceByMap( configTemplates.crabCfgTemplate, repMap ) } return super(GenericValidationData, self).createCrabCfg( crabCfg, path ) @property def Bookkeeping(self): return configTemplates.Bookkeeping @property def LoadBasicModules(self): return configTemplates.LoadBasicModules @abstractproperty def TrackSelectionRefitting(self): pass @property def FileOutputTemplate(self): return configTemplates.FileOutputTemplate @abstractproperty def DefinePath(self): pass class GenericValidationData_CTSR(GenericValidationData): #common track selection and refitting defaults = { "momentumconstraint": "None", "openmasswindow": "False", "cosmicsdecomode": "True", "removetrackhitfiltercommands": "", "appendtrackhitfiltercommands": "", } def getRepMap(self, alignment=None): result = super(GenericValidationData_CTSR, self).getRepMap(alignment) from .trackSplittingValidation import TrackSplittingValidation result.update({ "ValidationSequence": self.ValidationSequence, "istracksplitting": str(isinstance(self, TrackSplittingValidation)), "cosmics0T": str(self.cosmics0T), "use_d0cut": str(self.use_d0cut), "ispvvalidation": str(self.isPVValidation) }) commands = [] for removeorappend in "remove", "append": optionname = removeorappend + "trackhitfiltercommands" if result[optionname]: for command in result[optionname].split(","): command = command.strip() commands.append('process.TrackerTrackHitFilter.commands.{}("{}")'.format(removeorappend, command)) result["trackhitfiltercommands"] = "\n".join(commands) return result @property def use_d0cut(self): return "Cosmics" not in self.general["trackcollection"] #use it for collisions only @property def isPVValidation(self): return False # only for PV Validation sequence @property def TrackSelectionRefitting(self): return configTemplates.CommonTrackSelectionRefitting @property def DefinePath(self): return configTemplates.DefinePath_CommonSelectionRefitting @abstractproperty def ValidationSequence(self): pass @property def cosmics0T(self): if "Cosmics" not in self.general["trackcollection"]: return False Bfield = self.dataset.magneticFieldForRun() if Bfield < 0.5: return True if isinstance(Bfield, str): if "unknown " in Bfield: msg = Bfield.replace("unknown ","",1) elif Bfield == "unknown": msg = "Can't get the B field for %s." % self.dataset.name() else: msg = "B field = {}???".format(Bfield) raise AllInOneError(msg + "\n" "To use this dataset, specify magneticfield = [value] in your .ini config file.") return False class ParallelValidation(GenericValidation): @classmethod def initMerge(cls): return "" @abstractmethod def appendToMerge(self): pass @classmethod def doInitMerge(cls): from .plottingOptions import PlottingOptions result = cls.initMerge() result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result def doMerge(self): result = self.appendToMerge() if result[-1] != "\n": result += "\n" result += ("if [[ tmpMergeRetCode -eq 0 ]]; then\n" " xrdcp -f .oO[finalOutputFile]Oo. root://eoscms//eos/cms.oO[finalResultFile]Oo.\n" "fi\n" "if [[ ${tmpMergeRetCode} -gt ${mergeRetCode} ]]; then\n" " mergeRetCode=${tmpMergeRetCode}\n" "fi\n") result = replaceByMap(result, self.getRepMap()) return result class ValidationWithPlots(GenericValidation): @classmethod def runPlots(cls, validations): return ("cp .oO[plottingscriptpath]Oo. .\n" "root -x -b -q .oO[plottingscriptname]Oo.++") @abstractmethod def appendToPlots(self): pass @abstractmethod def plottingscriptname(cls): """override with a classmethod""" @abstractmethod def plottingscripttemplate(cls): """override with a classmethod""" @abstractmethod def plotsdirname(cls): """override with a classmethod""" @classmethod def doRunPlots(cls, validations): from .plottingOptions import PlottingOptions cls.createPlottingScript(validations) result = cls.runPlots(validations) result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result @classmethod def createPlottingScript(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() filename = replaceByMap(".oO[plottingscriptpath]Oo.", repmap) repmap["PlottingInstantiation"] = "\n".join( replaceByMap(v.appendToPlots(), v.getRepMap()).rstrip("\n") for v in validations ) plottingscript = replaceByMap(cls.plottingscripttemplate(), repmap) with open(filename, 'w') as f: f.write(plottingscript) class ValidationWithPlotsSummaryBase(ValidationWithPlots): class SummaryItem(object): # MASKED: __init__ function (lines 640-656) def name(self, latex=False): if latex: return self.__latexname else: return self.__name def format(self, value, latex=False): if latex: fmt = self.__latexformat else: fmt = self.__format if re.match(".*[{][^}]*[fg][}].*", fmt): value = float(value) return fmt.format(value) def values(self, latex=False): result = [self.format(v, latex=latex) for v in self.__values] return result def value(self, i, latex): return self.values(latex)[i] @abstractmethod def getsummaryitems(cls, folder): """override with a classmethod that returns a list of SummaryItems based on the plots saved in folder""" __summaryitems = None __lastfolder = None @classmethod def summaryitemsstring(cls, folder=None, latex=False, transpose=True): if folder is None: folder = cls.plotsdirname() if folder.startswith( "/castor/" ): folder = "rfio:%(file)s"%repMap elif folder.startswith( "/store/" ): folder = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if cls.__summaryitems is None or cls.__lastfolder != folder: cls.__lastfolder = folder cls.__summaryitems = cls.getsummaryitems(folder) summaryitems = cls.__summaryitems if not summaryitems: raise AllInOneError("No summary items!") size = {len(_.values(latex)) for _ in summaryitems} if len(size) != 1: raise AllInOneError("Some summary items have different numbers of values\n{}".format(size)) size = size.pop() if transpose: columnwidths = ([max(len(_.name(latex)) for _ in summaryitems)] + [max(len(_.value(i, latex)) for _ in summaryitems) for i in range(size)]) else: columnwidths = [max(len(entry) for entry in [_.name(latex)] + _.values(latex)) for _ in summaryitems] if latex: join = " & " else: join = " " row = join.join("{{:{}}}".format(width) for width in columnwidths) if transpose: rows = [row.format(*[_.name(latex)]+_.values(latex)) for _ in summaryitems] else: rows = [] rows.append(row.format(*(_.name for _ in summaryitems))) for i in range(size): rows.append(row.format(*(_.value(i, latex) for _ in summaryitems))) if latex: join = " \\\\\n" else: join = "\n" result = join.join(rows) if latex: result = (r"\begin{{tabular}}{{{}}}".format("|" + "|".join("c"*(len(columnwidths))) + "|") + "\n" + result + "\n" + r"\end{tabular}") return result @classmethod def printsummaryitems(cls, *args, **kwargs): print(cls.summaryitemsstring(*args, **kwargs)) @classmethod def writesummaryitems(cls, filename, *args, **kwargs): with open(filename, "w") as f: f.write(cls.summaryitemsstring(*args, **kwargs)+"\n") class ValidationWithPlotsSummary(ValidationWithPlotsSummaryBase): @classmethod def getsummaryitems(cls, folder): result = [] with open(os.path.join(folder, "{}Summary.txt".format(cls.__name__))) as f: for line in f: split = line.rstrip("\n").split("\t") kwargs = {} for thing in split[:]: if thing.startswith("format="): kwargs["format"] = thing.replace("format=", "", 1) split.remove(thing) if thing.startswith("latexname="): kwargs["latexname"] = thing.replace("latexname=", "", 1) split.remove(thing) if thing.startswith("latexformat="): kwargs["latexformat"] = thing.replace("latexformat=", "", 1) split.remove(thing) name = split[0] values = split[1:] result.append(cls.SummaryItem(name, values, **kwargs)) return result class ValidationWithComparison(GenericValidation): @classmethod def doComparison(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() repmap["compareStrings"] = " , ".join(v.getCompareStrings("OfflineValidation") for v in validations) repmap["compareStringsPlain"] = " , ".join(v.getCompareStrings("OfflineValidation", True) for v in validations) comparison = replaceByMap(cls.comparisontemplate(), repmap) return comparison @classmethod def comparisontemplate(cls): return configTemplates.compareAlignmentsExecution @classmethod def comparealignmentspath(cls): return ".oO[Alignment/OfflineValidation]Oo./scripts/.oO[compareAlignmentsName]Oo." @abstractmethod def comparealignmentsname(cls): """classmethod""" class ValidationForPresentation(ValidationWithPlots): @abstractmethod def presentationsubsections(cls): """classmethod"""
def __init__(self, name, values, format=None, latexname=None, latexformat=None): """ name: name of the summary item, goes on top of the column values: value for each alignment (in order of rows) format: python format string (default: {:.3g}, meaning up to 3 significant digits) latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) latexformat: format for latex (default: format) """ if format is None: format = "{:.3g}" if latexname is None: latexname = name if latexformat is None: latexformat = format self.__name = name self.__values = values self.__format = format self.__latexname = latexname self.__latexformat = latexformat
640
656
from __future__ import print_function from __future__ import absolute_import from builtins import range from abc import ABCMeta, abstractmethod, abstractproperty import os import re import json from . import globalDictionaries from . import configTemplates from .dataset import Dataset from .helperFunctions import replaceByMap, addIndex, getCommandOutput2, boolfromstring, pythonboolstring from .TkAlExceptions import AllInOneError from six import with_metaclass class ValidationMetaClass(ABCMeta): sets = ["mandatories", "optionals", "needpackages"] dicts = ["defaults"] def __new__(cls, clsname, bases, dct): for setname in cls.sets: if setname not in dct: dct[setname] = set() dct[setname] = set.union(dct[setname], *(getattr(base, setname) for base in bases if hasattr(base, setname))) for dictname in cls.dicts: if dictname not in dct: dct[dictname] = {} for base in bases: if not hasattr(base, dictname): continue newdict = getattr(base, dictname) for key in set(newdict) & set(dct[dictname]): if newdict[key] != dct[dictname][key]: raise ValueError("Inconsistent values of defaults[{}]: {}, {}".format(key, newdict[key], dct[dictname][key])) dct[dictname].update(newdict) for setname in cls.sets: #e.g. removemandatories, used in preexistingvalidation #use with caution if "remove"+setname not in dct: dct["remove"+setname] = set() dct["remove"+setname] = set.union(dct["remove"+setname], *(getattr(base, "remove"+setname) for base in bases if hasattr(base, "remove"+setname))) dct[setname] -= dct["remove"+setname] return super(ValidationMetaClass, cls).__new__(cls, clsname, bases, dct) class GenericValidation(with_metaclass(ValidationMetaClass,object)): defaultReferenceName = "DEFAULT" mandatories = set() defaults = { "cmssw": os.environ['CMSSW_BASE'], "parallelJobs": "1", "jobid": "", "needsproxy": "false", } needpackages = {"Alignment/OfflineValidation"} optionals = {"jobmode"} def __init__(self, valName, alignment, config): import random self.name = valName self.alignmentToValidate = alignment self.general = config.getGeneral() self.randomWorkdirPart = "%0i"%random.randint(1,10e9) self.configFiles = [] self.config = config self.jobid = "" theUpdate = config.getResultingSection(self.valType+":"+self.name, defaultDict = self.defaults, demandPars = self.mandatories) self.general.update(theUpdate) self.jobmode = self.general["jobmode"] self.NJobs = int(self.general["parallelJobs"]) self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") # limit maximum number of parallel jobs to 40 # (each output file is approximately 20MB) maximumNumberJobs = 40 if self.NJobs > maximumNumberJobs: msg = ("Maximum allowed number of parallel jobs " +str(maximumNumberJobs)+" exceeded!!!") raise AllInOneError(msg) if self.NJobs > 1 and not isinstance(self, ParallelValidation): raise AllInOneError("Parallel jobs not implemented for {}!\n" "Please set parallelJobs = 1.".format(type(self).__name__)) self.jobid = self.general["jobid"] if self.jobid: try: #make sure it's actually a valid jobid output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) if "is not found" in output: raise RuntimeError except RuntimeError: raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) self.cmssw = self.general["cmssw"] badcharacters = r"\'" for character in badcharacters: if character in self.cmssw: raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" "path name. If you really have it in such a ridiculously named location,\n" "try making a symbolic link somewhere with a decent name.") try: os.listdir(self.cmssw) except OSError: raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') if self.cmssw == os.environ["CMSSW_BASE"]: self.scramarch = os.environ["SCRAM_ARCH"] self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] else: command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') commandoutput = getCommandOutput2(command).split('\n') self.cmssw = commandoutput[0] self.scramarch = commandoutput[1] self.cmsswreleasebase = commandoutput[2] self.packages = {} for package in self.needpackages: for placetolook in self.cmssw, self.cmsswreleasebase: pkgpath = os.path.join(placetolook, "src", package) if os.path.exists(pkgpath): self.packages[package] = pkgpath break else: raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) self.AutoAlternates = True if config.has_option("alternateTemplates","AutoAlternates"): try: self.AutoAlternates = json.loads(config.get("alternateTemplates","AutoAlternates").lower()) except ValueError: raise AllInOneError("AutoAlternates needs to be true or false, not %s" % config.get("alternateTemplates","AutoAlternates")) knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals ignoreOpts = [] config.checkInput(self.valType+":"+self.name, knownSimpleOptions = knownOpts, ignoreOptions = ignoreOpts) def getRepMap(self, alignment = None): from .plottingOptions import PlottingOptions if alignment == None: alignment = self.alignmentToValidate try: result = PlottingOptions(self.config, self.valType) except KeyError: result = {} result.update(alignment.getRepMap()) result.update(self.general) result.update({ "workdir": os.path.join(self.general["workdir"], self.randomWorkdirPart), "datadir": self.general["datadir"], "logdir": self.general["logdir"], "CommandLineTemplate": ("#run configfile and post-proccess it\n" "cmsRun %(cfgFile)s\n" "%(postProcess)s "), "CMSSW_BASE": self.cmssw, "SCRAM_ARCH": self.scramarch, "CMSSW_RELEASE_BASE": self.cmsswreleasebase, "alignmentName": alignment.name, "condLoad": alignment.getConditions(), "LoadGlobalTagTemplate": configTemplates.loadGlobalTagTemplate, }) result.update(self.packages) return result @abstractproperty def filesToCompare(self): pass def getCompareStrings( self, requestId = None, plain = False ): result = {} repMap = self.getRepMap().copy() for validationId in self.filesToCompare: repMap["file"] = self.filesToCompare[ validationId ] if repMap["file"].startswith( "/castor/" ): repMap["file"] = "rfio:%(file)s"%repMap elif repMap["file"].startswith( "/store/" ): repMap["file"] = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if plain: result[validationId]=repMap["file"] else: result[validationId]= "%(file)s=%(title)s|%(color)s|%(style)s"%repMap if requestId == None: return result else: if not "." in requestId: requestId += ".%s"%self.defaultReferenceName if not requestId.split(".")[-1] in result: msg = ("could not find %s in reference Objects!" %requestId.split(".")[-1]) raise AllInOneError(msg) return result[ requestId.split(".")[-1] ] def createFiles(self, fileContents, path, repMap = None, repMaps = None): """repMap: single map for all files repMaps: a dict, with the filenames as the keys""" if repMap is not None and repMaps is not None: raise AllInOneError("createFiles can only take repMap or repMaps (or neither), not both") result = [] for fileName in fileContents: filePath = os.path.join(path, fileName) result.append(filePath) for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)): theFile = open( filePathi, "w" ) fileContentsi = fileContents[ fileName ] if repMaps is not None: repMap = repMaps[fileName] if repMap is not None: repMap.update({"nIndex": str(i)}) fileContentsi = replaceByMap(fileContentsi, repMap) theFile.write( fileContentsi ) theFile.close() return result def createConfiguration(self, fileContents, path, schedule = None, repMap = None, repMaps = None): self.configFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) if not schedule == None: schedule = [os.path.join( path, cfgName) for cfgName in schedule] for cfgName in schedule: if not cfgName in self.configFiles: msg = ("scheduled %s missing in generated configfiles: %s" %(cfgName, self.configFiles)) raise AllInOneError(msg) for cfgName in self.configFiles: if not cfgName in schedule: msg = ("generated configuration %s not scheduled: %s" %(cfgName, schedule)) raise AllInOneError(msg) self.configFiles = schedule return self.configFiles def createScript(self, fileContents, path, downloadFiles=[], repMap = None, repMaps = None): self.scriptFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) for script in self.scriptFiles: for scriptwithindex in addIndex(script, self.NJobs): os.chmod(scriptwithindex,0o755) return self.scriptFiles def createCrabCfg(self, fileContents, path ): if self.NJobs > 1: msg = ("jobmode 'crab' not supported for parallel validation." " Please set parallelJobs = 1.") raise AllInOneError(msg) self.crabConfigFiles = self.createFiles(fileContents, path) return self.crabConfigFiles class GenericValidationData(GenericValidation): """ Subclass of `GenericValidation` which is the base for validations using datasets. """ needParentFiles = False mandatories = {"dataset", "maxevents"} defaults = { "runRange": "", "firstRun": "", "lastRun": "", "begin": "", "end": "", "JSON": "", "dasinstance": "prod/global", "ttrhbuilder":"WithAngleAndTemplate", "usepixelqualityflag": "True", } optionals = {"magneticfield"} def __init__(self, valName, alignment, config): """ This method adds additional items to the `self.general` dictionary which are only needed for validations using datasets. Arguments: - `valName`: String which identifies individual validation instances - `alignment`: `Alignment` instance to validate - `config`: `BetterConfigParser` instance which includes the configuration of the validations """ super(GenericValidationData, self).__init__(valName, alignment, config) # if maxevents is not specified, cannot calculate number of events for # each parallel job, and therefore running only a single job if int( self.general["maxevents"] ) < 0 and self.NJobs > 1: msg = ("Maximum number of events (maxevents) not specified: " "cannot use parallel jobs.") raise AllInOneError(msg) if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs: msg = ("maxevents has to be divisible by parallelJobs") raise AllInOneError(msg) tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == "" and self.general["firstRun"] == "" and self.general["lastRun"] == "" and self.general["begin"] == "" and self.general["end"] == "") if self.general["dataset"] not in globalDictionaries.usedDatasets: globalDictionaries.usedDatasets[self.general["dataset"]] = {} if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]: if globalDictionaries.usedDatasets[self.general["dataset"]] != {}: print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\n" "This is allowed, but make sure it's not a mistake") % self.general["dataset"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None} Bfield = self.general.get("magneticfield", None) if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None: dataset = Dataset( self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst, cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield, dasinstance = self.general["dasinstance"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] self.general["magneticField"] = self.dataset.magneticField() self.general["defaultMagneticField"] = "MagneticField" if self.general["magneticField"] == "unknown": print("Could not get the magnetic field for this dataset.") print("Using the default: ", self.general["defaultMagneticField"]) self.general["magneticField"] = '.oO[defaultMagneticField]Oo.' if not self.jobmode.split( ',' )[0] == "crab": try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], parent = self.needParentFiles ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str(e) raise AllInOneError(msg) else: if self.dataset.predefined(): msg = ("For jobmode 'crab' you cannot use predefined datasets " "(in your case: '%s')."%( self.dataset.name() )) raise AllInOneError( msg ) try: theUpdate = config.getResultingSection(self.valType+":"+self.name, demandPars = ["parallelJobs"]) except AllInOneError as e: msg = str(e)[:-1]+" when using 'jobmode: crab'." raise AllInOneError(msg) self.general.update(theUpdate) if self.general["begin"] or self.general["end"]: ( self.general["begin"], self.general["end"], self.general["firstRun"], self.general["lastRun"] ) = self.dataset.convertTimeToRun( firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], shortTuple = False) if self.general["begin"] == None: self.general["begin"] = "" if self.general["end"] == None: self.general["end"] = "" self.general["firstRun"] = str( self.general["firstRun"] ) self.general["lastRun"] = str( self.general["lastRun"] ) if ( not self.general["firstRun"] ) and \ ( self.general["end"] or self.general["lastRun"] ): self.general["firstRun"] = str( self.dataset.runList()[0]["run_number"]) if ( not self.general["lastRun"] ) and \ ( self.general["begin"] or self.general["firstRun"] ): self.general["lastRun"] = str( self.dataset.runList()[-1]["run_number"]) if self.general["firstRun"] and self.general["lastRun"]: if int(self.general["firstRun"]) > int(self.general["lastRun"]): msg = ( "The lower time/runrange limit ('begin'/'firstRun') " "chosen is greater than the upper time/runrange limit " "('end'/'lastRun').") raise AllInOneError( msg ) self.general["runRange"] = (self.general["firstRun"] + '-' + self.general["lastRun"]) try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], crab = True ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str( e ) raise AllInOneError( msg ) self.general["usepixelqualityflag"] = pythonboolstring(self.general["usepixelqualityflag"], "usepixelqualityflag") def getRepMap(self, alignment = None): result = super(GenericValidationData, self).getRepMap(alignment) outputfile = os.path.expandvars(replaceByMap( "%s_%s_.oO[name]Oo..root" % (self.outputBaseName, self.name) , result)) resultfile = os.path.expandvars(replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./" + "%s_%s_.oO[name]Oo..root" % (self.resultBaseName, self.name)) , result)) result.update({ "resultFile": ".oO[resultFiles[.oO[nIndex]Oo.]]Oo.", "resultFiles": addIndex(resultfile, self.NJobs), "finalResultFile": resultfile, "outputFile": ".oO[outputFiles[.oO[nIndex]Oo.]]Oo.", "outputFiles": addIndex(outputfile, self.NJobs), "finalOutputFile": outputfile, "ProcessName": self.ProcessName, "Bookkeeping": self.Bookkeeping, "LoadBasicModules": self.LoadBasicModules, "TrackSelectionRefitting": self.TrackSelectionRefitting, "ValidationConfig": self.ValidationTemplate, "FileOutputTemplate": self.FileOutputTemplate, "DefinePath": self.DefinePath, }) return result @property def cfgName(self): return "%s.%s.%s_cfg.py"%( self.configBaseName, self.name, self.alignmentToValidate.name ) @abstractproperty def ProcessName(self): pass @property def cfgTemplate(self): return configTemplates.cfgTemplate @abstractproperty def ValidationTemplate(self): pass @property def filesToCompare(self): return {self.defaultReferenceName: self.getRepMap()["finalResultFile"]} def createConfiguration(self, path ): repMap = self.getRepMap() cfgs = {self.cfgName: self.cfgTemplate} super(GenericValidationData, self).createConfiguration(cfgs, path, repMap=repMap) def createScript(self, path, template = configTemplates.scriptTemplate, downloadFiles=[], repMap = None, repMaps = None): scriptName = "%s.%s.%s.sh"%(self.scriptBaseName, self.name, self.alignmentToValidate.name ) if repMap is None and repMaps is None: repMap = self.getRepMap() repMap["CommandLine"]="" for cfg in self.configFiles: repMap["CommandLine"]+= repMap["CommandLineTemplate"]%{"cfgFile":addIndex(cfg, self.NJobs, ".oO[nIndex]Oo."), "postProcess":"" } scripts = {scriptName: template} return super(GenericValidationData, self).createScript(scripts, path, downloadFiles = downloadFiles, repMap = repMap, repMaps = repMaps) def createCrabCfg(self, path, crabCfgBaseName): """ Method which creates a `crab.cfg` for a validation on datasets. Arguments: - `path`: Path at which the file will be stored. - `crabCfgBaseName`: String which depends on the actual type of validation calling this method. """ crabCfgName = "crab.%s.%s.%s.cfg"%( crabCfgBaseName, self.name, self.alignmentToValidate.name ) repMap = self.getRepMap() repMap["script"] = "dummy_script.sh" # repMap["crabOutputDir"] = os.path.basename( path ) repMap["crabWorkingDir"] = crabCfgName.split( '.cfg' )[0] self.crabWorkingDir = repMap["crabWorkingDir"] repMap["numberOfJobs"] = self.general["parallelJobs"] repMap["cfgFile"] = self.configFiles[0] repMap["queue"] = self.jobmode.split( ',' )[1].split( '-q' )[1] if self.dataset.dataType() == "mc": repMap["McOrData"] = "events = .oO[nEvents]Oo." elif self.dataset.dataType() == "data": repMap["McOrData"] = "lumis = -1" if self.jobmode.split( ',' )[0] == "crab": print ("For jobmode 'crab' the parameter 'maxevents' will be " "ignored and all events will be processed.") else: raise AllInOneError("Unknown data type! Can't run in crab mode") crabCfg = {crabCfgName: replaceByMap( configTemplates.crabCfgTemplate, repMap ) } return super(GenericValidationData, self).createCrabCfg( crabCfg, path ) @property def Bookkeeping(self): return configTemplates.Bookkeeping @property def LoadBasicModules(self): return configTemplates.LoadBasicModules @abstractproperty def TrackSelectionRefitting(self): pass @property def FileOutputTemplate(self): return configTemplates.FileOutputTemplate @abstractproperty def DefinePath(self): pass class GenericValidationData_CTSR(GenericValidationData): #common track selection and refitting defaults = { "momentumconstraint": "None", "openmasswindow": "False", "cosmicsdecomode": "True", "removetrackhitfiltercommands": "", "appendtrackhitfiltercommands": "", } def getRepMap(self, alignment=None): result = super(GenericValidationData_CTSR, self).getRepMap(alignment) from .trackSplittingValidation import TrackSplittingValidation result.update({ "ValidationSequence": self.ValidationSequence, "istracksplitting": str(isinstance(self, TrackSplittingValidation)), "cosmics0T": str(self.cosmics0T), "use_d0cut": str(self.use_d0cut), "ispvvalidation": str(self.isPVValidation) }) commands = [] for removeorappend in "remove", "append": optionname = removeorappend + "trackhitfiltercommands" if result[optionname]: for command in result[optionname].split(","): command = command.strip() commands.append('process.TrackerTrackHitFilter.commands.{}("{}")'.format(removeorappend, command)) result["trackhitfiltercommands"] = "\n".join(commands) return result @property def use_d0cut(self): return "Cosmics" not in self.general["trackcollection"] #use it for collisions only @property def isPVValidation(self): return False # only for PV Validation sequence @property def TrackSelectionRefitting(self): return configTemplates.CommonTrackSelectionRefitting @property def DefinePath(self): return configTemplates.DefinePath_CommonSelectionRefitting @abstractproperty def ValidationSequence(self): pass @property def cosmics0T(self): if "Cosmics" not in self.general["trackcollection"]: return False Bfield = self.dataset.magneticFieldForRun() if Bfield < 0.5: return True if isinstance(Bfield, str): if "unknown " in Bfield: msg = Bfield.replace("unknown ","",1) elif Bfield == "unknown": msg = "Can't get the B field for %s." % self.dataset.name() else: msg = "B field = {}???".format(Bfield) raise AllInOneError(msg + "\n" "To use this dataset, specify magneticfield = [value] in your .ini config file.") return False class ParallelValidation(GenericValidation): @classmethod def initMerge(cls): return "" @abstractmethod def appendToMerge(self): pass @classmethod def doInitMerge(cls): from .plottingOptions import PlottingOptions result = cls.initMerge() result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result def doMerge(self): result = self.appendToMerge() if result[-1] != "\n": result += "\n" result += ("if [[ tmpMergeRetCode -eq 0 ]]; then\n" " xrdcp -f .oO[finalOutputFile]Oo. root://eoscms//eos/cms.oO[finalResultFile]Oo.\n" "fi\n" "if [[ ${tmpMergeRetCode} -gt ${mergeRetCode} ]]; then\n" " mergeRetCode=${tmpMergeRetCode}\n" "fi\n") result = replaceByMap(result, self.getRepMap()) return result class ValidationWithPlots(GenericValidation): @classmethod def runPlots(cls, validations): return ("cp .oO[plottingscriptpath]Oo. .\n" "root -x -b -q .oO[plottingscriptname]Oo.++") @abstractmethod def appendToPlots(self): pass @abstractmethod def plottingscriptname(cls): """override with a classmethod""" @abstractmethod def plottingscripttemplate(cls): """override with a classmethod""" @abstractmethod def plotsdirname(cls): """override with a classmethod""" @classmethod def doRunPlots(cls, validations): from .plottingOptions import PlottingOptions cls.createPlottingScript(validations) result = cls.runPlots(validations) result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result @classmethod def createPlottingScript(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() filename = replaceByMap(".oO[plottingscriptpath]Oo.", repmap) repmap["PlottingInstantiation"] = "\n".join( replaceByMap(v.appendToPlots(), v.getRepMap()).rstrip("\n") for v in validations ) plottingscript = replaceByMap(cls.plottingscripttemplate(), repmap) with open(filename, 'w') as f: f.write(plottingscript) class ValidationWithPlotsSummaryBase(ValidationWithPlots): class SummaryItem(object): def __init__(self, name, values, format=None, latexname=None, latexformat=None): """ name: name of the summary item, goes on top of the column values: value for each alignment (in order of rows) format: python format string (default: {:.3g}, meaning up to 3 significant digits) latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) latexformat: format for latex (default: format) """ if format is None: format = "{:.3g}" if latexname is None: latexname = name if latexformat is None: latexformat = format self.__name = name self.__values = values self.__format = format self.__latexname = latexname self.__latexformat = latexformat def name(self, latex=False): if latex: return self.__latexname else: return self.__name def format(self, value, latex=False): if latex: fmt = self.__latexformat else: fmt = self.__format if re.match(".*[{][^}]*[fg][}].*", fmt): value = float(value) return fmt.format(value) def values(self, latex=False): result = [self.format(v, latex=latex) for v in self.__values] return result def value(self, i, latex): return self.values(latex)[i] @abstractmethod def getsummaryitems(cls, folder): """override with a classmethod that returns a list of SummaryItems based on the plots saved in folder""" __summaryitems = None __lastfolder = None @classmethod def summaryitemsstring(cls, folder=None, latex=False, transpose=True): if folder is None: folder = cls.plotsdirname() if folder.startswith( "/castor/" ): folder = "rfio:%(file)s"%repMap elif folder.startswith( "/store/" ): folder = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if cls.__summaryitems is None or cls.__lastfolder != folder: cls.__lastfolder = folder cls.__summaryitems = cls.getsummaryitems(folder) summaryitems = cls.__summaryitems if not summaryitems: raise AllInOneError("No summary items!") size = {len(_.values(latex)) for _ in summaryitems} if len(size) != 1: raise AllInOneError("Some summary items have different numbers of values\n{}".format(size)) size = size.pop() if transpose: columnwidths = ([max(len(_.name(latex)) for _ in summaryitems)] + [max(len(_.value(i, latex)) for _ in summaryitems) for i in range(size)]) else: columnwidths = [max(len(entry) for entry in [_.name(latex)] + _.values(latex)) for _ in summaryitems] if latex: join = " & " else: join = " " row = join.join("{{:{}}}".format(width) for width in columnwidths) if transpose: rows = [row.format(*[_.name(latex)]+_.values(latex)) for _ in summaryitems] else: rows = [] rows.append(row.format(*(_.name for _ in summaryitems))) for i in range(size): rows.append(row.format(*(_.value(i, latex) for _ in summaryitems))) if latex: join = " \\\\\n" else: join = "\n" result = join.join(rows) if latex: result = (r"\begin{{tabular}}{{{}}}".format("|" + "|".join("c"*(len(columnwidths))) + "|") + "\n" + result + "\n" + r"\end{tabular}") return result @classmethod def printsummaryitems(cls, *args, **kwargs): print(cls.summaryitemsstring(*args, **kwargs)) @classmethod def writesummaryitems(cls, filename, *args, **kwargs): with open(filename, "w") as f: f.write(cls.summaryitemsstring(*args, **kwargs)+"\n") class ValidationWithPlotsSummary(ValidationWithPlotsSummaryBase): @classmethod def getsummaryitems(cls, folder): result = [] with open(os.path.join(folder, "{}Summary.txt".format(cls.__name__))) as f: for line in f: split = line.rstrip("\n").split("\t") kwargs = {} for thing in split[:]: if thing.startswith("format="): kwargs["format"] = thing.replace("format=", "", 1) split.remove(thing) if thing.startswith("latexname="): kwargs["latexname"] = thing.replace("latexname=", "", 1) split.remove(thing) if thing.startswith("latexformat="): kwargs["latexformat"] = thing.replace("latexformat=", "", 1) split.remove(thing) name = split[0] values = split[1:] result.append(cls.SummaryItem(name, values, **kwargs)) return result class ValidationWithComparison(GenericValidation): @classmethod def doComparison(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() repmap["compareStrings"] = " , ".join(v.getCompareStrings("OfflineValidation") for v in validations) repmap["compareStringsPlain"] = " , ".join(v.getCompareStrings("OfflineValidation", True) for v in validations) comparison = replaceByMap(cls.comparisontemplate(), repmap) return comparison @classmethod def comparisontemplate(cls): return configTemplates.compareAlignmentsExecution @classmethod def comparealignmentspath(cls): return ".oO[Alignment/OfflineValidation]Oo./scripts/.oO[compareAlignmentsName]Oo." @abstractmethod def comparealignmentsname(cls): """classmethod""" class ValidationForPresentation(ValidationWithPlots): @abstractmethod def presentationsubsections(cls): """classmethod"""
main
Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results.
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} # MASKED: main function (lines 36-70) def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.")
36
70
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
_apply_filters
Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise.
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version # MASKED: _apply_filters function (lines 88-119) def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True
88
119
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
_apply_filters_incompatible
Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise.
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True # MASKED: _apply_filters_incompatible function (lines 121-146) def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False
121
146
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
_display_metadata
Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display.
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False # MASKED: _display_metadata function (lines 148-204) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT))
148
204
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
_parse_command_line
Parse command line arguments. Returns (argsparse.Namespace) Command line arguments.
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) # MASKED: _parse_command_line function (lines 206-238) # End of file
def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args
206
238
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
list_user_usage
A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread.
# coding: utf-8 """ Purity//FB REST Client Client for Purity//FB REST API (1.0 - 1.6), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/). OpenAPI spec version: 1.6 Contact: info@purestorage.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class UsageUsersApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client # MASKED: list_user_usage function (lines 43-73) def list_user_usage_with_http_info(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'uids'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_user_usage" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'sort' in params: query_params.append(('sort', params['sort'])) if 'start' in params: query_params.append(('start', params['start'])) if 'token' in params: query_params.append(('token', params['token'])) if 'file_system_names' in params: query_params.append(('file_system_names', params['file_system_names'])) collection_formats['file_system_names'] = 'csv' if 'uids' in params: query_params.append(('uids', params['uids'])) collection_formats['uids'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.6/usage/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='QuotasUserResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
def list_user_usage(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_user_usage_with_http_info(**kwargs) else: (data) = self.list_user_usage_with_http_info(**kwargs) return data
43
73
# coding: utf-8 """ Purity//FB REST Client Client for Purity//FB REST API (1.0 - 1.6), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/). OpenAPI spec version: 1.6 Contact: info@purestorage.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class UsageUsersApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def list_user_usage(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_user_usage_with_http_info(**kwargs) else: (data) = self.list_user_usage_with_http_info(**kwargs) return data def list_user_usage_with_http_info(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'uids'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_user_usage" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'sort' in params: query_params.append(('sort', params['sort'])) if 'start' in params: query_params.append(('start', params['start'])) if 'token' in params: query_params.append(('token', params['token'])) if 'file_system_names' in params: query_params.append(('file_system_names', params['file_system_names'])) collection_formats['file_system_names'] = 'csv' if 'uids' in params: query_params.append(('uids', params['uids'])) collection_formats['uids'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.6/usage/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='QuotasUserResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
list_user_usage_with_http_info
A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread.
# coding: utf-8 """ Purity//FB REST Client Client for Purity//FB REST API (1.0 - 1.6), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/). OpenAPI spec version: 1.6 Contact: info@purestorage.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class UsageUsersApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def list_user_usage(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_user_usage_with_http_info(**kwargs) else: (data) = self.list_user_usage_with_http_info(**kwargs) return data # MASKED: list_user_usage_with_http_info function (lines 75-169)
def list_user_usage_with_http_info(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'uids'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_user_usage" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'sort' in params: query_params.append(('sort', params['sort'])) if 'start' in params: query_params.append(('start', params['start'])) if 'token' in params: query_params.append(('token', params['token'])) if 'file_system_names' in params: query_params.append(('file_system_names', params['file_system_names'])) collection_formats['file_system_names'] = 'csv' if 'uids' in params: query_params.append(('uids', params['uids'])) collection_formats['uids'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.6/usage/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='QuotasUserResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
75
169
# coding: utf-8 """ Purity//FB REST Client Client for Purity//FB REST API (1.0 - 1.6), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/). OpenAPI spec version: 1.6 Contact: info@purestorage.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class UsageUsersApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def list_user_usage(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_user_usage_with_http_info(**kwargs) else: (data) = self.list_user_usage_with_http_info(**kwargs) return data def list_user_usage_with_http_info(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'uids'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_user_usage" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'sort' in params: query_params.append(('sort', params['sort'])) if 'start' in params: query_params.append(('start', params['start'])) if 'token' in params: query_params.append(('token', params['token'])) if 'file_system_names' in params: query_params.append(('file_system_names', params['file_system_names'])) collection_formats['file_system_names'] = 'csv' if 'uids' in params: query_params.append(('uids', params['uids'])) collection_formats['uids'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.6/usage/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='QuotasUserResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
fetch
Fetch data. This method fetches all data of the dataset/view. Note that this method returns a column-major data (i.e. :obj:`([a[0], ..., a[3]], ..., [c[0], ... c[3]])`, :obj:`{'a': [a[0], ..., a[3]], ..., 'c': [c[0], ..., c[3]]}`, or :obj:`[a[0], ..., a[3]]`). Returns: If :attr:`mode` is :class:`tuple`, this method returns a tuple of lists/arrays. If :attr:`mode` is :class:`dict`, this method returns a dict of lists/arrays.
import numpy import torch import pytorch_pfn_extras as ppe from torch.utils.data import Dataset class TabularDataset(Dataset): """An abstract class that represents tabular dataset. This class represents a tabular dataset. In a tabular dataset, all examples have the same number of elements. For example, all examples of the dataset below have three elements (:obj:`a[i]`, :obj:`b[i]`, and :obj:`c[i]`). .. csv-table:: :header: , a, b, c 0, :obj:`a[0]`, :obj:`b[0]`, :obj:`c[0]` 1, :obj:`a[1]`, :obj:`b[1]`, :obj:`c[1]` 2, :obj:`a[2]`, :obj:`b[2]`, :obj:`c[2]` 3, :obj:`a[3]`, :obj:`b[3]`, :obj:`c[3]` Since an example can be represented by both tuple and dict ( :obj:`(a[i], b[i], c[i])` and :obj:`{'a': a[i], 'b': b[i], 'c': c[i]}`), this class uses :attr:`mode` to indicate which representation will be used. If there is only one column, an example also can be represented by a value (:obj:`a[i]`). In this case, :attr:`mode` is :obj:`None`. An inheritance should implement :meth:`__len__`, :attr:`keys`, :attr:`mode` and :meth:`get_examples`. >>> import numpy as np >>> >>> from pytorch_pfn_extras import dataset >>> >>> class MyDataset(dataset.TabularDataset): ... ... def __len__(self): ... return 4 ... ... @property ... def keys(self): ... return ('a', 'b', 'c') ... ... @property ... def mode(self): ... return tuple ... ... def get_examples(self, indices, key_indices): ... data = np.arange(12).reshape((4, 3)) ... if indices is not None: ... data = data[indices] ... if key_indices is not None: ... data = data[:, list(key_indices)] ... return tuple(data.transpose()) ... >>> dataset = MyDataset() >>> len(dataset) 4 >>> dataset.keys ('a', 'b', 'c') >>> dataset.astuple()[0] (0, 1, 2) >>> sorted(dataset.asdict()[0].items()) [('a', 0), ('b', 1), ('c', 2)] >>> >>> view = dataset.slice[[3, 2], ('c', 0)] >>> len(view) 2 >>> view.keys ('c', 'a') >>> view.astuple()[1] (8, 6) >>> sorted(view.asdict()[1].items()) [('a', 6), ('c', 8)] """ def __len__(self): raise NotImplementedError @property def keys(self): """Names of columns. A tuple of strings that indicate the names of columns. """ raise NotImplementedError @property def mode(self): """Mode of representation. This indicates the type of value returned by :meth:`fetch` and :meth:`__getitem__`. :class:`tuple`, :class:`dict`, and :obj:`None` are supported. """ raise NotImplementedError def get_examples(self, indices, key_indices): """Return a part of data. Args: indices (list of ints or slice): Indices of requested rows. If this argument is :obj:`None`, it indicates all rows. key_indices (tuple of ints): Indices of requested columns. If this argument is :obj:`None`, it indicates all columns. Returns: tuple of lists/arrays """ raise NotImplementedError @property def slice(self): """Get a slice of dataset. Args: indices (list/array of ints/bools or slice): Requested rows. keys (tuple of ints/strs or int or str): Requested columns. Returns: A view of specified range. """ return ppe.dataset.tabular._slice._SliceHelper(self) # MASKED: fetch function (lines 128-149) def convert(self, data): """Convert fetched data. This method takes data fetched by :meth:`fetch` and pre-process them before passing them to models. The default behaviour is converting each column into an ndarray. This behaviour can be overridden by :meth:`with_converter`. If the dataset is constructed by :meth:`concat` or :meth:`join`, the converter of the first dataset is used. Args: data (tuple or dict): Data from :meth:`fetch`. Returns: A tuple or dict. Each value is an ndarray. """ if isinstance(data, tuple): return tuple(_as_array(d) for d in data) elif isinstance(data, dict): return {k: _as_array(v) for k, v in data.items()} else: return _as_array(data) def astuple(self): """Return a view with tuple mode. Returns: A view whose :attr:`mode` is :class:`tuple`. """ return ppe.dataset.tabular._asmode._Astuple(self) def asdict(self): """Return a view with dict mode. Returns: A view whose :attr:`mode` is :class:`dict`. """ return ppe.dataset.tabular._asmode._Asdict(self) def concat(self, *datasets): """Stack datasets along rows. Args: datasets (iterable of :class:`TabularDataset`): Datasets to be concatenated. All datasets must have the same :attr:`keys`. Returns: A concatenated dataset. """ return ppe.dataset.tabular._concat._Concat( self, *datasets) def join(self, *datasets): """Stack datasets along columns. Args: datasets (iterable of :class:`TabularDataset`): Datasets to be concatenated. All datasets must have the same length Returns: A joined dataset. """ return ppe.dataset.tabular._join._Join(self, *datasets) def transform(self, keys, transform): """Apply a transform to each example. The transformations are a list where each element is a tuple that holds the transformation signature and a callable that is the transformation itself. The transformation signature is a tuple of 2 elements with the first one being the keys of the dataset that are taken as inputs. And the last one the outputs it produces for the transformation `keys` argument. When multiple transformations are specified, the outputs must be disjoint or `ValueError` will be risen. Args: keys (tuple of strs): The keys of transformed examples. transform (list of tuples): A list where each element specifies a transformation with a tuple with the transformation signature and a callable that takes an example and returns transformed example. :attr:`mode` of transformed dataset is determined by the transformed examples. Returns: A transfromed dataset. """ return ppe.dataset.tabular._transform._Transform( self, keys, transform) def transform_batch(self, keys, transform_batch): """Apply a transform to examples. The transformations are a list where each element is a tuple that holds the transformation signature and a callable that is the transformation itself. The transformation signature is a tuple of 2 elements with the first one being the keys of the dataset that are taken as inputs. And the last one the outputs it produces for the transformation `keys` argument. When multiple transformations are specified, the outputs must be disjoint or `ValueError` will be risen. Args: keys (tuple of strs): The keys of transformed examples. transform_batch (list of tuples): A list where each element specifies a transformation with a tuple with the transformation signature and a callable that takes a batch of examples and returns a batch of transformed examples. :attr:`mode` of transformed dataset is determined by the transformed examples. Returns: A transfromed dataset. """ return ppe.dataset.tabular._transform._TransformBatch( self, keys, transform_batch) def with_converter(self, converter): """Override the behaviour of :meth:`convert`. This method overrides :meth:`convert`. Args: converter (callable): A new converter. Returns: A dataset with the new converter. """ return ppe.dataset.tabular._with_converter._WithConverter( self, converter) def get_example(self, i): example = self.get_examples([i], None) example = tuple(col[0] for col in example) if self.mode is tuple: return example elif self.mode is dict: return dict(zip(self.keys, example)) elif self.mode is None: return example[0] def __iter__(self): return (self.get_example(i) for i in range(len(self))) def __getitem__(self, index): """Returns an example or a sequence of examples. It implements the standard Python indexing and one-dimensional integer array indexing. It uses the :meth:`get_example` method by default, but it may be overridden by the implementation to, for example, improve the slicing performance. Args: index (int, slice, list or numpy.ndarray): An index of an example or indexes of examples. Returns: If index is int, returns an example created by `get_example`. If index is either slice or one-dimensional list or numpy.ndarray, returns a list of examples created by `get_example`. """ if isinstance(index, slice): current, stop, step = index.indices(len(self)) return [self.get_example(i) for i in range(current, stop, step)] elif isinstance(index, list) or isinstance(index, numpy.ndarray): return [self.get_example(i) for i in index] else: return self.get_example(index) def _as_array(data): if isinstance(data, (numpy.ndarray, torch.Tensor)): return data else: return numpy.array(data)
def fetch(self): """Fetch data. This method fetches all data of the dataset/view. Note that this method returns a column-major data (i.e. :obj:`([a[0], ..., a[3]], ..., [c[0], ... c[3]])`, :obj:`{'a': [a[0], ..., a[3]], ..., 'c': [c[0], ..., c[3]]}`, or :obj:`[a[0], ..., a[3]]`). Returns: If :attr:`mode` is :class:`tuple`, this method returns a tuple of lists/arrays. If :attr:`mode` is :class:`dict`, this method returns a dict of lists/arrays. """ examples = self.get_examples(None, None) if self.mode is tuple: return examples elif self.mode is dict: return dict(zip(self.keys, examples)) elif self.mode is None: return examples[0]
128
149
import numpy import torch import pytorch_pfn_extras as ppe from torch.utils.data import Dataset class TabularDataset(Dataset): """An abstract class that represents tabular dataset. This class represents a tabular dataset. In a tabular dataset, all examples have the same number of elements. For example, all examples of the dataset below have three elements (:obj:`a[i]`, :obj:`b[i]`, and :obj:`c[i]`). .. csv-table:: :header: , a, b, c 0, :obj:`a[0]`, :obj:`b[0]`, :obj:`c[0]` 1, :obj:`a[1]`, :obj:`b[1]`, :obj:`c[1]` 2, :obj:`a[2]`, :obj:`b[2]`, :obj:`c[2]` 3, :obj:`a[3]`, :obj:`b[3]`, :obj:`c[3]` Since an example can be represented by both tuple and dict ( :obj:`(a[i], b[i], c[i])` and :obj:`{'a': a[i], 'b': b[i], 'c': c[i]}`), this class uses :attr:`mode` to indicate which representation will be used. If there is only one column, an example also can be represented by a value (:obj:`a[i]`). In this case, :attr:`mode` is :obj:`None`. An inheritance should implement :meth:`__len__`, :attr:`keys`, :attr:`mode` and :meth:`get_examples`. >>> import numpy as np >>> >>> from pytorch_pfn_extras import dataset >>> >>> class MyDataset(dataset.TabularDataset): ... ... def __len__(self): ... return 4 ... ... @property ... def keys(self): ... return ('a', 'b', 'c') ... ... @property ... def mode(self): ... return tuple ... ... def get_examples(self, indices, key_indices): ... data = np.arange(12).reshape((4, 3)) ... if indices is not None: ... data = data[indices] ... if key_indices is not None: ... data = data[:, list(key_indices)] ... return tuple(data.transpose()) ... >>> dataset = MyDataset() >>> len(dataset) 4 >>> dataset.keys ('a', 'b', 'c') >>> dataset.astuple()[0] (0, 1, 2) >>> sorted(dataset.asdict()[0].items()) [('a', 0), ('b', 1), ('c', 2)] >>> >>> view = dataset.slice[[3, 2], ('c', 0)] >>> len(view) 2 >>> view.keys ('c', 'a') >>> view.astuple()[1] (8, 6) >>> sorted(view.asdict()[1].items()) [('a', 6), ('c', 8)] """ def __len__(self): raise NotImplementedError @property def keys(self): """Names of columns. A tuple of strings that indicate the names of columns. """ raise NotImplementedError @property def mode(self): """Mode of representation. This indicates the type of value returned by :meth:`fetch` and :meth:`__getitem__`. :class:`tuple`, :class:`dict`, and :obj:`None` are supported. """ raise NotImplementedError def get_examples(self, indices, key_indices): """Return a part of data. Args: indices (list of ints or slice): Indices of requested rows. If this argument is :obj:`None`, it indicates all rows. key_indices (tuple of ints): Indices of requested columns. If this argument is :obj:`None`, it indicates all columns. Returns: tuple of lists/arrays """ raise NotImplementedError @property def slice(self): """Get a slice of dataset. Args: indices (list/array of ints/bools or slice): Requested rows. keys (tuple of ints/strs or int or str): Requested columns. Returns: A view of specified range. """ return ppe.dataset.tabular._slice._SliceHelper(self) def fetch(self): """Fetch data. This method fetches all data of the dataset/view. Note that this method returns a column-major data (i.e. :obj:`([a[0], ..., a[3]], ..., [c[0], ... c[3]])`, :obj:`{'a': [a[0], ..., a[3]], ..., 'c': [c[0], ..., c[3]]}`, or :obj:`[a[0], ..., a[3]]`). Returns: If :attr:`mode` is :class:`tuple`, this method returns a tuple of lists/arrays. If :attr:`mode` is :class:`dict`, this method returns a dict of lists/arrays. """ examples = self.get_examples(None, None) if self.mode is tuple: return examples elif self.mode is dict: return dict(zip(self.keys, examples)) elif self.mode is None: return examples[0] def convert(self, data): """Convert fetched data. This method takes data fetched by :meth:`fetch` and pre-process them before passing them to models. The default behaviour is converting each column into an ndarray. This behaviour can be overridden by :meth:`with_converter`. If the dataset is constructed by :meth:`concat` or :meth:`join`, the converter of the first dataset is used. Args: data (tuple or dict): Data from :meth:`fetch`. Returns: A tuple or dict. Each value is an ndarray. """ if isinstance(data, tuple): return tuple(_as_array(d) for d in data) elif isinstance(data, dict): return {k: _as_array(v) for k, v in data.items()} else: return _as_array(data) def astuple(self): """Return a view with tuple mode. Returns: A view whose :attr:`mode` is :class:`tuple`. """ return ppe.dataset.tabular._asmode._Astuple(self) def asdict(self): """Return a view with dict mode. Returns: A view whose :attr:`mode` is :class:`dict`. """ return ppe.dataset.tabular._asmode._Asdict(self) def concat(self, *datasets): """Stack datasets along rows. Args: datasets (iterable of :class:`TabularDataset`): Datasets to be concatenated. All datasets must have the same :attr:`keys`. Returns: A concatenated dataset. """ return ppe.dataset.tabular._concat._Concat( self, *datasets) def join(self, *datasets): """Stack datasets along columns. Args: datasets (iterable of :class:`TabularDataset`): Datasets to be concatenated. All datasets must have the same length Returns: A joined dataset. """ return ppe.dataset.tabular._join._Join(self, *datasets) def transform(self, keys, transform): """Apply a transform to each example. The transformations are a list where each element is a tuple that holds the transformation signature and a callable that is the transformation itself. The transformation signature is a tuple of 2 elements with the first one being the keys of the dataset that are taken as inputs. And the last one the outputs it produces for the transformation `keys` argument. When multiple transformations are specified, the outputs must be disjoint or `ValueError` will be risen. Args: keys (tuple of strs): The keys of transformed examples. transform (list of tuples): A list where each element specifies a transformation with a tuple with the transformation signature and a callable that takes an example and returns transformed example. :attr:`mode` of transformed dataset is determined by the transformed examples. Returns: A transfromed dataset. """ return ppe.dataset.tabular._transform._Transform( self, keys, transform) def transform_batch(self, keys, transform_batch): """Apply a transform to examples. The transformations are a list where each element is a tuple that holds the transformation signature and a callable that is the transformation itself. The transformation signature is a tuple of 2 elements with the first one being the keys of the dataset that are taken as inputs. And the last one the outputs it produces for the transformation `keys` argument. When multiple transformations are specified, the outputs must be disjoint or `ValueError` will be risen. Args: keys (tuple of strs): The keys of transformed examples. transform_batch (list of tuples): A list where each element specifies a transformation with a tuple with the transformation signature and a callable that takes a batch of examples and returns a batch of transformed examples. :attr:`mode` of transformed dataset is determined by the transformed examples. Returns: A transfromed dataset. """ return ppe.dataset.tabular._transform._TransformBatch( self, keys, transform_batch) def with_converter(self, converter): """Override the behaviour of :meth:`convert`. This method overrides :meth:`convert`. Args: converter (callable): A new converter. Returns: A dataset with the new converter. """ return ppe.dataset.tabular._with_converter._WithConverter( self, converter) def get_example(self, i): example = self.get_examples([i], None) example = tuple(col[0] for col in example) if self.mode is tuple: return example elif self.mode is dict: return dict(zip(self.keys, example)) elif self.mode is None: return example[0] def __iter__(self): return (self.get_example(i) for i in range(len(self))) def __getitem__(self, index): """Returns an example or a sequence of examples. It implements the standard Python indexing and one-dimensional integer array indexing. It uses the :meth:`get_example` method by default, but it may be overridden by the implementation to, for example, improve the slicing performance. Args: index (int, slice, list or numpy.ndarray): An index of an example or indexes of examples. Returns: If index is int, returns an example created by `get_example`. If index is either slice or one-dimensional list or numpy.ndarray, returns a list of examples created by `get_example`. """ if isinstance(index, slice): current, stop, step = index.indices(len(self)) return [self.get_example(i) for i in range(current, stop, step)] elif isinstance(index, list) or isinstance(index, numpy.ndarray): return [self.get_example(i) for i in index] else: return self.get_example(index) def _as_array(data): if isinstance(data, (numpy.ndarray, torch.Tensor)): return data else: return numpy.array(data)
run
Run a script in a separate thread and start a server for the app. This starts a blocking ioloop. Parameters ---------- script_path : str command_line : str args : [str] flag_options : Dict[str, Any]
# Copyright 2018-2021 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import signal import sys from typing import Any, Dict import click import tornado.ioloop from streamlit.git_util import GitRepo, MIN_GIT_VERSION from streamlit import version from streamlit import config from streamlit import net_util from streamlit import url_util from streamlit import env_util from streamlit import secrets from streamlit import util from streamlit.config import CONFIG_FILENAMES from streamlit.logger import get_logger from streamlit.report import Report from streamlit.secrets import SECRETS_FILE_LOC from streamlit.server.server import Server, server_address_is_unix_socket from streamlit.watcher.file_watcher import watch_file from streamlit.watcher.file_watcher import report_watchdog_availability LOGGER = get_logger(__name__) # Wait for 1 second before opening a browser. This gives old tabs a chance to # reconnect. # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS. BROWSER_WAIT_TIMEOUT_SEC = 1 NEW_VERSION_TEXT = """ %(new_version)s See what's new at https://discuss.streamlit.io/c/announcements Enter the following command to upgrade: %(prompt)s %(command)s """ % { "new_version": click.style( "A new version of Streamlit is available.", fg="blue", bold=True ), "prompt": click.style("$", fg="blue"), "command": click.style("pip install streamlit --upgrade", bold=True), } def _set_up_signal_handler(): LOGGER.debug("Setting up signal handler") def signal_handler(signal_number, stack_frame): # The server will shut down its threads and stop the ioloop Server.get_current().stop() signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) if sys.platform == "win32": signal.signal(signal.SIGBREAK, signal_handler) else: signal.signal(signal.SIGQUIT, signal_handler) def _fix_sys_path(script_path): """Add the script's folder to the sys path. Python normally does this automatically, but since we exec the script ourselves we need to do it instead. """ sys.path.insert(0, os.path.dirname(script_path)) def _fix_matplotlib_crash(): """Set Matplotlib backend to avoid a crash. The default Matplotlib backend crashes Python on OSX when run on a thread that's not the main thread, so here we set a safer backend as a fix. Users can always disable this behavior by setting the config runner.fixMatplotlib = false. This fix is OS-independent. We didn't see a good reason to make this Mac-only. Consistency within Streamlit seemed more important. """ if config.get_option("runner.fixMatplotlib"): try: # TODO: a better option may be to set # os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards # the top of __init__.py, before importing anything that imports # pandas (which imports matplotlib). Alternately, we could set # this environment variable in a new entrypoint defined in # setup.py. Both of these introduce additional trickiness: they # need to run without consulting streamlit.config.get_option, # because this would import streamlit, and therefore matplotlib. import matplotlib matplotlib.use("Agg") except ImportError: pass def _fix_tornado_crash(): """Set default asyncio policy to be compatible with Tornado 6. Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows. So here we pick the older SelectorEventLoopPolicy when the OS is Windows if the known-incompatible default policy is in use. This has to happen as early as possible to make it a low priority and overrideable See: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if env_util.IS_WINDOWS and sys.version_info >= (3, 8): import asyncio try: from asyncio import ( # type: ignore[attr-defined] WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # Not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with # Tornado 6 fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) def _fix_sys_argv(script_path, args): """sys.argv needs to exclude streamlit arguments and parameters and be set to what a user's script may expect. """ import sys sys.argv = [script_path] + list(args) def _on_server_start(server): _maybe_print_old_git_warning(server.script_path) _print_url(server.is_running_hello) report_watchdog_availability() _print_new_version_message() # Load secrets.toml if it exists. If the file doesn't exist, this # function will return without raising an exception. We catch any parse # errors and display them here. try: secrets.load_if_toml_exists() except BaseException as e: LOGGER.error(f"Failed to load {SECRETS_FILE_LOC}", exc_info=e) def maybe_open_browser(): if config.get_option("server.headless"): # Don't open browser when in headless mode. return if server.browser_is_connected: # Don't auto-open browser if there's already a browser connected. # This can happen if there's an old tab repeatedly trying to # connect, and it happens to success before we launch the browser. return if config.is_manually_set("browser.serverAddress"): addr = config.get_option("browser.serverAddress") elif config.is_manually_set("server.address"): if server_address_is_unix_socket(): # Don't open browser when server address is an unix socket return addr = config.get_option("server.address") else: addr = "localhost" util.open_browser(Report.get_url(addr)) # Schedule the browser to open using the IO Loop on the main thread, but # only if no other browser connects within 1s. ioloop = tornado.ioloop.IOLoop.current() ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser) def _fix_pydeck_mapbox_api_warning(): """Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception""" os.environ["MAPBOX_API_KEY"] = config.get_option("mapbox.token") def _print_new_version_message(): if version.should_show_new_version_notice(): click.secho(NEW_VERSION_TEXT) def _print_url(is_running_hello): if is_running_hello: title_message = "Welcome to Streamlit. Check out our demo in your browser." else: title_message = "You can now view your Streamlit app in your browser." named_urls = [] if config.is_manually_set("browser.serverAddress"): named_urls = [ ("URL", Report.get_url(config.get_option("browser.serverAddress"))) ] elif ( config.is_manually_set("server.address") and not server_address_is_unix_socket() ): named_urls = [ ("URL", Report.get_url(config.get_option("server.address"))), ] elif config.get_option("server.headless"): internal_ip = net_util.get_internal_ip() if internal_ip: named_urls.append(("Network URL", Report.get_url(internal_ip))) external_ip = net_util.get_external_ip() if external_ip: named_urls.append(("External URL", Report.get_url(external_ip))) else: named_urls = [ ("Local URL", Report.get_url("localhost")), ] internal_ip = net_util.get_internal_ip() if internal_ip: named_urls.append(("Network URL", Report.get_url(internal_ip))) click.secho("") click.secho(" %s" % title_message, fg="blue", bold=True) click.secho("") for url_name, url in named_urls: url_util.print_url(url_name, url) click.secho("") if is_running_hello: click.secho(" Ready to create your own Python apps super quickly?") click.secho(" Head over to ", nl=False) click.secho("https://docs.streamlit.io", bold=True) click.secho("") click.secho(" May you create awesome apps!") click.secho("") click.secho("") def _maybe_print_old_git_warning(script_path: str) -> None: """If our script is running in a Git repo, and we're running a very old Git version, print a warning that Git integration will be unavailable. """ repo = GitRepo(script_path) if ( not repo.is_valid() and repo.git_version is not None and repo.git_version < MIN_GIT_VERSION ): git_version_string = ".".join(str(val) for val in repo.git_version) min_version_string = ".".join(str(val) for val in MIN_GIT_VERSION) click.secho("") click.secho(" Git integration is disabled.", fg="yellow", bold=True) click.secho("") click.secho( f" Streamlit requires Git {min_version_string} or later, " f"but you have {git_version_string}.", fg="yellow", ) click.secho( " Git is used by Streamlit Sharing (https://streamlit.io/sharing).", fg="yellow", ) click.secho(" To enable this feature, please update Git.", fg="yellow") def load_config_options(flag_options: Dict[str, Any]): """Load config options from config.toml files, then overlay the ones set by flag_options. The "streamlit run" command supports passing Streamlit's config options as flags. This function reads through the config options set via flag, massages them, and passes them to get_config_options() so that they overwrite config option defaults and those loaded from config.toml files. Parameters ---------- flag_options : Dict[str, Any] A dict of config options where the keys are the CLI flag version of the config option names. """ options_from_flags = { name.replace("_", "."): val for name, val in flag_options.items() if val is not None } # Force a reparse of config files (if they exist). The result is cached # for future calls. config.get_config_options(force_reparse=True, options_from_flags=options_from_flags) def _install_config_watchers(flag_options: Dict[str, Any]): def on_config_changed(_path): load_config_options(flag_options) for filename in CONFIG_FILENAMES: if os.path.exists(filename): watch_file(filename, on_config_changed) # MASKED: run function (lines 330-365)
def run(script_path, command_line, args, flag_options): """Run a script in a separate thread and start a server for the app. This starts a blocking ioloop. Parameters ---------- script_path : str command_line : str args : [str] flag_options : Dict[str, Any] """ _fix_sys_path(script_path) _fix_matplotlib_crash() _fix_tornado_crash() _fix_sys_argv(script_path, args) _fix_pydeck_mapbox_api_warning() _install_config_watchers(flag_options) # Install a signal handler that will shut down the ioloop # and close all our threads _set_up_signal_handler() ioloop = tornado.ioloop.IOLoop.current() # Create and start the server. server = Server(ioloop, script_path, command_line) server.start(_on_server_start) # (Must come after start(), because this starts a new thread and start() # may call sys.exit() which doesn't kill other threads. server.add_preheated_report_session() # Start the ioloop. This function will not return until the # server is shut down. ioloop.start()
330
365
# Copyright 2018-2021 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import signal import sys from typing import Any, Dict import click import tornado.ioloop from streamlit.git_util import GitRepo, MIN_GIT_VERSION from streamlit import version from streamlit import config from streamlit import net_util from streamlit import url_util from streamlit import env_util from streamlit import secrets from streamlit import util from streamlit.config import CONFIG_FILENAMES from streamlit.logger import get_logger from streamlit.report import Report from streamlit.secrets import SECRETS_FILE_LOC from streamlit.server.server import Server, server_address_is_unix_socket from streamlit.watcher.file_watcher import watch_file from streamlit.watcher.file_watcher import report_watchdog_availability LOGGER = get_logger(__name__) # Wait for 1 second before opening a browser. This gives old tabs a chance to # reconnect. # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS. BROWSER_WAIT_TIMEOUT_SEC = 1 NEW_VERSION_TEXT = """ %(new_version)s See what's new at https://discuss.streamlit.io/c/announcements Enter the following command to upgrade: %(prompt)s %(command)s """ % { "new_version": click.style( "A new version of Streamlit is available.", fg="blue", bold=True ), "prompt": click.style("$", fg="blue"), "command": click.style("pip install streamlit --upgrade", bold=True), } def _set_up_signal_handler(): LOGGER.debug("Setting up signal handler") def signal_handler(signal_number, stack_frame): # The server will shut down its threads and stop the ioloop Server.get_current().stop() signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) if sys.platform == "win32": signal.signal(signal.SIGBREAK, signal_handler) else: signal.signal(signal.SIGQUIT, signal_handler) def _fix_sys_path(script_path): """Add the script's folder to the sys path. Python normally does this automatically, but since we exec the script ourselves we need to do it instead. """ sys.path.insert(0, os.path.dirname(script_path)) def _fix_matplotlib_crash(): """Set Matplotlib backend to avoid a crash. The default Matplotlib backend crashes Python on OSX when run on a thread that's not the main thread, so here we set a safer backend as a fix. Users can always disable this behavior by setting the config runner.fixMatplotlib = false. This fix is OS-independent. We didn't see a good reason to make this Mac-only. Consistency within Streamlit seemed more important. """ if config.get_option("runner.fixMatplotlib"): try: # TODO: a better option may be to set # os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards # the top of __init__.py, before importing anything that imports # pandas (which imports matplotlib). Alternately, we could set # this environment variable in a new entrypoint defined in # setup.py. Both of these introduce additional trickiness: they # need to run without consulting streamlit.config.get_option, # because this would import streamlit, and therefore matplotlib. import matplotlib matplotlib.use("Agg") except ImportError: pass def _fix_tornado_crash(): """Set default asyncio policy to be compatible with Tornado 6. Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows. So here we pick the older SelectorEventLoopPolicy when the OS is Windows if the known-incompatible default policy is in use. This has to happen as early as possible to make it a low priority and overrideable See: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if env_util.IS_WINDOWS and sys.version_info >= (3, 8): import asyncio try: from asyncio import ( # type: ignore[attr-defined] WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # Not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with # Tornado 6 fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) def _fix_sys_argv(script_path, args): """sys.argv needs to exclude streamlit arguments and parameters and be set to what a user's script may expect. """ import sys sys.argv = [script_path] + list(args) def _on_server_start(server): _maybe_print_old_git_warning(server.script_path) _print_url(server.is_running_hello) report_watchdog_availability() _print_new_version_message() # Load secrets.toml if it exists. If the file doesn't exist, this # function will return without raising an exception. We catch any parse # errors and display them here. try: secrets.load_if_toml_exists() except BaseException as e: LOGGER.error(f"Failed to load {SECRETS_FILE_LOC}", exc_info=e) def maybe_open_browser(): if config.get_option("server.headless"): # Don't open browser when in headless mode. return if server.browser_is_connected: # Don't auto-open browser if there's already a browser connected. # This can happen if there's an old tab repeatedly trying to # connect, and it happens to success before we launch the browser. return if config.is_manually_set("browser.serverAddress"): addr = config.get_option("browser.serverAddress") elif config.is_manually_set("server.address"): if server_address_is_unix_socket(): # Don't open browser when server address is an unix socket return addr = config.get_option("server.address") else: addr = "localhost" util.open_browser(Report.get_url(addr)) # Schedule the browser to open using the IO Loop on the main thread, but # only if no other browser connects within 1s. ioloop = tornado.ioloop.IOLoop.current() ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser) def _fix_pydeck_mapbox_api_warning(): """Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception""" os.environ["MAPBOX_API_KEY"] = config.get_option("mapbox.token") def _print_new_version_message(): if version.should_show_new_version_notice(): click.secho(NEW_VERSION_TEXT) def _print_url(is_running_hello): if is_running_hello: title_message = "Welcome to Streamlit. Check out our demo in your browser." else: title_message = "You can now view your Streamlit app in your browser." named_urls = [] if config.is_manually_set("browser.serverAddress"): named_urls = [ ("URL", Report.get_url(config.get_option("browser.serverAddress"))) ] elif ( config.is_manually_set("server.address") and not server_address_is_unix_socket() ): named_urls = [ ("URL", Report.get_url(config.get_option("server.address"))), ] elif config.get_option("server.headless"): internal_ip = net_util.get_internal_ip() if internal_ip: named_urls.append(("Network URL", Report.get_url(internal_ip))) external_ip = net_util.get_external_ip() if external_ip: named_urls.append(("External URL", Report.get_url(external_ip))) else: named_urls = [ ("Local URL", Report.get_url("localhost")), ] internal_ip = net_util.get_internal_ip() if internal_ip: named_urls.append(("Network URL", Report.get_url(internal_ip))) click.secho("") click.secho(" %s" % title_message, fg="blue", bold=True) click.secho("") for url_name, url in named_urls: url_util.print_url(url_name, url) click.secho("") if is_running_hello: click.secho(" Ready to create your own Python apps super quickly?") click.secho(" Head over to ", nl=False) click.secho("https://docs.streamlit.io", bold=True) click.secho("") click.secho(" May you create awesome apps!") click.secho("") click.secho("") def _maybe_print_old_git_warning(script_path: str) -> None: """If our script is running in a Git repo, and we're running a very old Git version, print a warning that Git integration will be unavailable. """ repo = GitRepo(script_path) if ( not repo.is_valid() and repo.git_version is not None and repo.git_version < MIN_GIT_VERSION ): git_version_string = ".".join(str(val) for val in repo.git_version) min_version_string = ".".join(str(val) for val in MIN_GIT_VERSION) click.secho("") click.secho(" Git integration is disabled.", fg="yellow", bold=True) click.secho("") click.secho( f" Streamlit requires Git {min_version_string} or later, " f"but you have {git_version_string}.", fg="yellow", ) click.secho( " Git is used by Streamlit Sharing (https://streamlit.io/sharing).", fg="yellow", ) click.secho(" To enable this feature, please update Git.", fg="yellow") def load_config_options(flag_options: Dict[str, Any]): """Load config options from config.toml files, then overlay the ones set by flag_options. The "streamlit run" command supports passing Streamlit's config options as flags. This function reads through the config options set via flag, massages them, and passes them to get_config_options() so that they overwrite config option defaults and those loaded from config.toml files. Parameters ---------- flag_options : Dict[str, Any] A dict of config options where the keys are the CLI flag version of the config option names. """ options_from_flags = { name.replace("_", "."): val for name, val in flag_options.items() if val is not None } # Force a reparse of config files (if they exist). The result is cached # for future calls. config.get_config_options(force_reparse=True, options_from_flags=options_from_flags) def _install_config_watchers(flag_options: Dict[str, Any]): def on_config_changed(_path): load_config_options(flag_options) for filename in CONFIG_FILENAMES: if os.path.exists(filename): watch_file(filename, on_config_changed) def run(script_path, command_line, args, flag_options): """Run a script in a separate thread and start a server for the app. This starts a blocking ioloop. Parameters ---------- script_path : str command_line : str args : [str] flag_options : Dict[str, Any] """ _fix_sys_path(script_path) _fix_matplotlib_crash() _fix_tornado_crash() _fix_sys_argv(script_path, args) _fix_pydeck_mapbox_api_warning() _install_config_watchers(flag_options) # Install a signal handler that will shut down the ioloop # and close all our threads _set_up_signal_handler() ioloop = tornado.ioloop.IOLoop.current() # Create and start the server. server = Server(ioloop, script_path, command_line) server.start(_on_server_start) # (Must come after start(), because this starts a new thread and start() # may call sys.exit() which doesn't kill other threads. server.add_preheated_report_session() # Start the ioloop. This function will not return until the # server is shut down. ioloop.start()
distance
>>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0
LAB_SOURCE_FILE = "lab05.py" """ Lab 05: Trees and Proj2 Prep """ def couple(lst1, lst2): """Return a list that contains lists with i-th elements of two sequences coupled together. >>> lst1 = [1, 2, 3] >>> lst2 = [4, 5, 6] >>> couple(lst1, lst2) [[1, 4], [2, 5], [3, 6]] >>> lst3 = ['c', 6] >>> lst4 = ['s', '1'] >>> couple(lst3, lst4) [['c', 's'], [6, '1']] """ assert len(lst1) == len(lst2) a = [] for i in range(len(lst1)): a.append([lst1[i], lst2[i]]) return a from math import sqrt # MASKED: distance function (lines 24-38) def closer_city(lat, lon, city1, city2): """ Returns the name of either city1 or city2, whichever is closest to coordinate (lat, lon). >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' """ tmp = make_city('tmp', lat, lon) dis1 = distance(city1, tmp) dis2 = distance(city2, tmp) if dis1 > dis2: return get_name(city2) else: return get_name(city1) def check_abstraction(): """ There's nothing for you to do for this function, it's just here for the extra doctest >>> change_abstraction(True) >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' >>> change_abstraction(False) """ # Treat all the following code as being behind an abstraction layer, you shouldn't need to look at it! def make_city(name, lat, lon): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' >>> get_lat(city) 0 >>> get_lon(city) 1 """ if change_abstraction.changed: return {"name" : name, "lat" : lat, "lon" : lon} else: return [name, lat, lon] def get_name(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' """ if change_abstraction.changed: return city["name"] else: return city[0] def get_lat(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lat(city) 0 """ if change_abstraction.changed: return city["lat"] else: return city[1] def get_lon(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lon(city) 1 """ if change_abstraction.changed: return city["lon"] else: return city[2] def change_abstraction(change): change_abstraction.changed = change change_abstraction.changed = False def nut_finder(t): """Returns True if t contains a node with the value 'nut' and False otherwise. >>> scrat = tree('nut') >>> nut_finder(scrat) True >>> sproul = tree('roots', [tree('branch1', [tree('leaf'), tree('nut')]), tree('branch2')]) >>> nut_finder(sproul) True >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> nut_finder(numbers) False >>> t = tree(1, [tree('nut',[tree('not nut')])]) >>> nut_finder(t) True """ if label(t) == 'nut': return True for node in branches(t): if nut_finder(node): return True return False def sprout_leaves(t, values): """Sprout new leaves containing the data in values at each leaf in the original tree t and return the resulting tree. >>> t1 = tree(1, [tree(2), tree(3)]) >>> print_tree(t1) 1 2 3 >>> new1 = sprout_leaves(t1, [4, 5]) >>> print_tree(new1) 1 2 4 5 3 4 5 >>> t2 = tree(1, [tree(2, [tree(3)])]) >>> print_tree(t2) 1 2 3 >>> new2 = sprout_leaves(t2, [6, 1, 2]) >>> print_tree(new2) 1 2 3 6 1 2 """ if is_leaf(t): return tree(label(t),[tree(v) for v in values]) return tree(label(t),[sprout_leaves(b, values) for b in branches(t)]) # Tree ADT def tree(label, branches=[]): """Construct a tree with the given label value and a list of branches.""" for branch in branches: assert is_tree(branch), 'branches must be trees' return [label] + list(branches) def label(tree): """Return the label value of a tree.""" return tree[0] def branches(tree): """Return the list of branches of the given tree.""" return tree[1:] def is_tree(tree): """Returns True if the given tree is a tree, and False otherwise.""" if type(tree) != list or len(tree) < 1: return False for branch in branches(tree): if not is_tree(branch): return False return True def is_leaf(tree): """Returns True if the given tree's list of branches is empty, and False otherwise. """ return not branches(tree) def print_tree(t, indent=0): """Print a representation of this tree in which each node is indented by two spaces times its depth from the root. >>> print_tree(tree(1)) 1 >>> print_tree(tree(1, [tree(2)])) 1 2 >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> print_tree(numbers) 1 2 3 4 5 6 7 """ print(' ' * indent + str(label(t))) for b in branches(t): print_tree(b, indent + 1) def copy_tree(t): """Returns a copy of t. Only for testing purposes. >>> t = tree(5) >>> copy = copy_tree(t) >>> t = tree(6) >>> print_tree(copy) 5 """ return tree(label(t), [copy_tree(b) for b in branches(t)]) def add_chars(w1, w2): """ Return a string containing the characters you need to add to w1 to get w2. You may assume that w1 is a subsequence of w2. >>> add_chars("owl", "howl") 'h' >>> add_chars("want", "wanton") 'on' >>> add_chars("rat", "radiate") 'diae' >>> add_chars("a", "prepare") 'prepre' >>> add_chars("resin", "recursion") 'curo' >>> add_chars("fin", "effusion") 'efuso' >>> add_chars("coy", "cacophony") 'acphon' >>> from construct_check import check >>> # ban iteration and sets >>> check(LAB_SOURCE_FILE, 'add_chars', ... ['For', 'While', 'Set', 'SetComp']) # Must use recursion True """ "*** YOUR CODE HERE ***" def add_trees(t1, t2): """ >>> numbers = tree(1, ... [tree(2, ... [tree(3), ... tree(4)]), ... tree(5, ... [tree(6, ... [tree(7)]), ... tree(8)])]) >>> print_tree(add_trees(numbers, numbers)) 2 4 6 8 10 12 14 16 >>> print_tree(add_trees(tree(2), tree(3, [tree(4), tree(5)]))) 5 4 5 >>> print_tree(add_trees(tree(2, [tree(3)]), tree(2, [tree(3), tree(4)]))) 4 6 4 >>> print_tree(add_trees(tree(2, [tree(3, [tree(4), tree(5)])]), \ tree(2, [tree(3, [tree(4)]), tree(5)]))) 4 6 8 5 5 """ "*** YOUR CODE HERE ***" # Shakespeare and Dictionaries def build_successors_table(tokens): """Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We'] """ table = {} prev = '.' for word in tokens: if prev not in table: "*** YOUR CODE HERE ***" "*** YOUR CODE HERE ***" prev = word return table def construct_sent(word, table): """Prints a random sentence starting with word, sampling from table. >>> table = {'Wow': ['!'], 'Sentences': ['are'], 'are': ['cool'], 'cool': ['.']} >>> construct_sent('Wow', table) 'Wow!' >>> construct_sent('Sentences', table) 'Sentences are cool.' """ import random result = '' while word not in ['.', '!', '?']: "*** YOUR CODE HERE ***" return result.strip() + word def shakespeare_tokens(path='shakespeare.txt', url='http://composingprograms.com/shakespeare.txt'): """Return the words of Shakespeare's plays as a list.""" import os from urllib.request import urlopen if os.path.exists(path): return open('shakespeare.txt', encoding='ascii').read().split() else: shakespeare = urlopen(url) return shakespeare.read().decode(encoding='ascii').split() # Uncomment the following two lines # tokens = shakespeare_tokens() # table = build_successors_table(tokens) def random_sent(): import random return construct_sent(random.choice(table['.']), table)
def distance(city1, city2): """ >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 """ x1, y1 = get_lat(city1), get_lon(city1) x2, y2 = get_lat(city2), get_lon(city2) return sqrt((x1 - x2)**2 + (y1 - y2)**2)
24
38
LAB_SOURCE_FILE = "lab05.py" """ Lab 05: Trees and Proj2 Prep """ def couple(lst1, lst2): """Return a list that contains lists with i-th elements of two sequences coupled together. >>> lst1 = [1, 2, 3] >>> lst2 = [4, 5, 6] >>> couple(lst1, lst2) [[1, 4], [2, 5], [3, 6]] >>> lst3 = ['c', 6] >>> lst4 = ['s', '1'] >>> couple(lst3, lst4) [['c', 's'], [6, '1']] """ assert len(lst1) == len(lst2) a = [] for i in range(len(lst1)): a.append([lst1[i], lst2[i]]) return a from math import sqrt def distance(city1, city2): """ >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 """ x1, y1 = get_lat(city1), get_lon(city1) x2, y2 = get_lat(city2), get_lon(city2) return sqrt((x1 - x2)**2 + (y1 - y2)**2) def closer_city(lat, lon, city1, city2): """ Returns the name of either city1 or city2, whichever is closest to coordinate (lat, lon). >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' """ tmp = make_city('tmp', lat, lon) dis1 = distance(city1, tmp) dis2 = distance(city2, tmp) if dis1 > dis2: return get_name(city2) else: return get_name(city1) def check_abstraction(): """ There's nothing for you to do for this function, it's just here for the extra doctest >>> change_abstraction(True) >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' >>> change_abstraction(False) """ # Treat all the following code as being behind an abstraction layer, you shouldn't need to look at it! def make_city(name, lat, lon): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' >>> get_lat(city) 0 >>> get_lon(city) 1 """ if change_abstraction.changed: return {"name" : name, "lat" : lat, "lon" : lon} else: return [name, lat, lon] def get_name(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' """ if change_abstraction.changed: return city["name"] else: return city[0] def get_lat(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lat(city) 0 """ if change_abstraction.changed: return city["lat"] else: return city[1] def get_lon(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lon(city) 1 """ if change_abstraction.changed: return city["lon"] else: return city[2] def change_abstraction(change): change_abstraction.changed = change change_abstraction.changed = False def nut_finder(t): """Returns True if t contains a node with the value 'nut' and False otherwise. >>> scrat = tree('nut') >>> nut_finder(scrat) True >>> sproul = tree('roots', [tree('branch1', [tree('leaf'), tree('nut')]), tree('branch2')]) >>> nut_finder(sproul) True >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> nut_finder(numbers) False >>> t = tree(1, [tree('nut',[tree('not nut')])]) >>> nut_finder(t) True """ if label(t) == 'nut': return True for node in branches(t): if nut_finder(node): return True return False def sprout_leaves(t, values): """Sprout new leaves containing the data in values at each leaf in the original tree t and return the resulting tree. >>> t1 = tree(1, [tree(2), tree(3)]) >>> print_tree(t1) 1 2 3 >>> new1 = sprout_leaves(t1, [4, 5]) >>> print_tree(new1) 1 2 4 5 3 4 5 >>> t2 = tree(1, [tree(2, [tree(3)])]) >>> print_tree(t2) 1 2 3 >>> new2 = sprout_leaves(t2, [6, 1, 2]) >>> print_tree(new2) 1 2 3 6 1 2 """ if is_leaf(t): return tree(label(t),[tree(v) for v in values]) return tree(label(t),[sprout_leaves(b, values) for b in branches(t)]) # Tree ADT def tree(label, branches=[]): """Construct a tree with the given label value and a list of branches.""" for branch in branches: assert is_tree(branch), 'branches must be trees' return [label] + list(branches) def label(tree): """Return the label value of a tree.""" return tree[0] def branches(tree): """Return the list of branches of the given tree.""" return tree[1:] def is_tree(tree): """Returns True if the given tree is a tree, and False otherwise.""" if type(tree) != list or len(tree) < 1: return False for branch in branches(tree): if not is_tree(branch): return False return True def is_leaf(tree): """Returns True if the given tree's list of branches is empty, and False otherwise. """ return not branches(tree) def print_tree(t, indent=0): """Print a representation of this tree in which each node is indented by two spaces times its depth from the root. >>> print_tree(tree(1)) 1 >>> print_tree(tree(1, [tree(2)])) 1 2 >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> print_tree(numbers) 1 2 3 4 5 6 7 """ print(' ' * indent + str(label(t))) for b in branches(t): print_tree(b, indent + 1) def copy_tree(t): """Returns a copy of t. Only for testing purposes. >>> t = tree(5) >>> copy = copy_tree(t) >>> t = tree(6) >>> print_tree(copy) 5 """ return tree(label(t), [copy_tree(b) for b in branches(t)]) def add_chars(w1, w2): """ Return a string containing the characters you need to add to w1 to get w2. You may assume that w1 is a subsequence of w2. >>> add_chars("owl", "howl") 'h' >>> add_chars("want", "wanton") 'on' >>> add_chars("rat", "radiate") 'diae' >>> add_chars("a", "prepare") 'prepre' >>> add_chars("resin", "recursion") 'curo' >>> add_chars("fin", "effusion") 'efuso' >>> add_chars("coy", "cacophony") 'acphon' >>> from construct_check import check >>> # ban iteration and sets >>> check(LAB_SOURCE_FILE, 'add_chars', ... ['For', 'While', 'Set', 'SetComp']) # Must use recursion True """ "*** YOUR CODE HERE ***" def add_trees(t1, t2): """ >>> numbers = tree(1, ... [tree(2, ... [tree(3), ... tree(4)]), ... tree(5, ... [tree(6, ... [tree(7)]), ... tree(8)])]) >>> print_tree(add_trees(numbers, numbers)) 2 4 6 8 10 12 14 16 >>> print_tree(add_trees(tree(2), tree(3, [tree(4), tree(5)]))) 5 4 5 >>> print_tree(add_trees(tree(2, [tree(3)]), tree(2, [tree(3), tree(4)]))) 4 6 4 >>> print_tree(add_trees(tree(2, [tree(3, [tree(4), tree(5)])]), \ tree(2, [tree(3, [tree(4)]), tree(5)]))) 4 6 8 5 5 """ "*** YOUR CODE HERE ***" # Shakespeare and Dictionaries def build_successors_table(tokens): """Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We'] """ table = {} prev = '.' for word in tokens: if prev not in table: "*** YOUR CODE HERE ***" "*** YOUR CODE HERE ***" prev = word return table def construct_sent(word, table): """Prints a random sentence starting with word, sampling from table. >>> table = {'Wow': ['!'], 'Sentences': ['are'], 'are': ['cool'], 'cool': ['.']} >>> construct_sent('Wow', table) 'Wow!' >>> construct_sent('Sentences', table) 'Sentences are cool.' """ import random result = '' while word not in ['.', '!', '?']: "*** YOUR CODE HERE ***" return result.strip() + word def shakespeare_tokens(path='shakespeare.txt', url='http://composingprograms.com/shakespeare.txt'): """Return the words of Shakespeare's plays as a list.""" import os from urllib.request import urlopen if os.path.exists(path): return open('shakespeare.txt', encoding='ascii').read().split() else: shakespeare = urlopen(url) return shakespeare.read().decode(encoding='ascii').split() # Uncomment the following two lines # tokens = shakespeare_tokens() # table = build_successors_table(tokens) def random_sent(): import random return construct_sent(random.choice(table['.']), table)
closer_city
Returns the name of either city1 or city2, whichever is closest to coordinate (lat, lon). >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest'
LAB_SOURCE_FILE = "lab05.py" """ Lab 05: Trees and Proj2 Prep """ def couple(lst1, lst2): """Return a list that contains lists with i-th elements of two sequences coupled together. >>> lst1 = [1, 2, 3] >>> lst2 = [4, 5, 6] >>> couple(lst1, lst2) [[1, 4], [2, 5], [3, 6]] >>> lst3 = ['c', 6] >>> lst4 = ['s', '1'] >>> couple(lst3, lst4) [['c', 's'], [6, '1']] """ assert len(lst1) == len(lst2) a = [] for i in range(len(lst1)): a.append([lst1[i], lst2[i]]) return a from math import sqrt def distance(city1, city2): """ >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 """ x1, y1 = get_lat(city1), get_lon(city1) x2, y2 = get_lat(city2), get_lon(city2) return sqrt((x1 - x2)**2 + (y1 - y2)**2) # MASKED: closer_city function (lines 40-61) def check_abstraction(): """ There's nothing for you to do for this function, it's just here for the extra doctest >>> change_abstraction(True) >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' >>> change_abstraction(False) """ # Treat all the following code as being behind an abstraction layer, you shouldn't need to look at it! def make_city(name, lat, lon): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' >>> get_lat(city) 0 >>> get_lon(city) 1 """ if change_abstraction.changed: return {"name" : name, "lat" : lat, "lon" : lon} else: return [name, lat, lon] def get_name(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' """ if change_abstraction.changed: return city["name"] else: return city[0] def get_lat(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lat(city) 0 """ if change_abstraction.changed: return city["lat"] else: return city[1] def get_lon(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lon(city) 1 """ if change_abstraction.changed: return city["lon"] else: return city[2] def change_abstraction(change): change_abstraction.changed = change change_abstraction.changed = False def nut_finder(t): """Returns True if t contains a node with the value 'nut' and False otherwise. >>> scrat = tree('nut') >>> nut_finder(scrat) True >>> sproul = tree('roots', [tree('branch1', [tree('leaf'), tree('nut')]), tree('branch2')]) >>> nut_finder(sproul) True >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> nut_finder(numbers) False >>> t = tree(1, [tree('nut',[tree('not nut')])]) >>> nut_finder(t) True """ if label(t) == 'nut': return True for node in branches(t): if nut_finder(node): return True return False def sprout_leaves(t, values): """Sprout new leaves containing the data in values at each leaf in the original tree t and return the resulting tree. >>> t1 = tree(1, [tree(2), tree(3)]) >>> print_tree(t1) 1 2 3 >>> new1 = sprout_leaves(t1, [4, 5]) >>> print_tree(new1) 1 2 4 5 3 4 5 >>> t2 = tree(1, [tree(2, [tree(3)])]) >>> print_tree(t2) 1 2 3 >>> new2 = sprout_leaves(t2, [6, 1, 2]) >>> print_tree(new2) 1 2 3 6 1 2 """ if is_leaf(t): return tree(label(t),[tree(v) for v in values]) return tree(label(t),[sprout_leaves(b, values) for b in branches(t)]) # Tree ADT def tree(label, branches=[]): """Construct a tree with the given label value and a list of branches.""" for branch in branches: assert is_tree(branch), 'branches must be trees' return [label] + list(branches) def label(tree): """Return the label value of a tree.""" return tree[0] def branches(tree): """Return the list of branches of the given tree.""" return tree[1:] def is_tree(tree): """Returns True if the given tree is a tree, and False otherwise.""" if type(tree) != list or len(tree) < 1: return False for branch in branches(tree): if not is_tree(branch): return False return True def is_leaf(tree): """Returns True if the given tree's list of branches is empty, and False otherwise. """ return not branches(tree) def print_tree(t, indent=0): """Print a representation of this tree in which each node is indented by two spaces times its depth from the root. >>> print_tree(tree(1)) 1 >>> print_tree(tree(1, [tree(2)])) 1 2 >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> print_tree(numbers) 1 2 3 4 5 6 7 """ print(' ' * indent + str(label(t))) for b in branches(t): print_tree(b, indent + 1) def copy_tree(t): """Returns a copy of t. Only for testing purposes. >>> t = tree(5) >>> copy = copy_tree(t) >>> t = tree(6) >>> print_tree(copy) 5 """ return tree(label(t), [copy_tree(b) for b in branches(t)]) def add_chars(w1, w2): """ Return a string containing the characters you need to add to w1 to get w2. You may assume that w1 is a subsequence of w2. >>> add_chars("owl", "howl") 'h' >>> add_chars("want", "wanton") 'on' >>> add_chars("rat", "radiate") 'diae' >>> add_chars("a", "prepare") 'prepre' >>> add_chars("resin", "recursion") 'curo' >>> add_chars("fin", "effusion") 'efuso' >>> add_chars("coy", "cacophony") 'acphon' >>> from construct_check import check >>> # ban iteration and sets >>> check(LAB_SOURCE_FILE, 'add_chars', ... ['For', 'While', 'Set', 'SetComp']) # Must use recursion True """ "*** YOUR CODE HERE ***" def add_trees(t1, t2): """ >>> numbers = tree(1, ... [tree(2, ... [tree(3), ... tree(4)]), ... tree(5, ... [tree(6, ... [tree(7)]), ... tree(8)])]) >>> print_tree(add_trees(numbers, numbers)) 2 4 6 8 10 12 14 16 >>> print_tree(add_trees(tree(2), tree(3, [tree(4), tree(5)]))) 5 4 5 >>> print_tree(add_trees(tree(2, [tree(3)]), tree(2, [tree(3), tree(4)]))) 4 6 4 >>> print_tree(add_trees(tree(2, [tree(3, [tree(4), tree(5)])]), \ tree(2, [tree(3, [tree(4)]), tree(5)]))) 4 6 8 5 5 """ "*** YOUR CODE HERE ***" # Shakespeare and Dictionaries def build_successors_table(tokens): """Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We'] """ table = {} prev = '.' for word in tokens: if prev not in table: "*** YOUR CODE HERE ***" "*** YOUR CODE HERE ***" prev = word return table def construct_sent(word, table): """Prints a random sentence starting with word, sampling from table. >>> table = {'Wow': ['!'], 'Sentences': ['are'], 'are': ['cool'], 'cool': ['.']} >>> construct_sent('Wow', table) 'Wow!' >>> construct_sent('Sentences', table) 'Sentences are cool.' """ import random result = '' while word not in ['.', '!', '?']: "*** YOUR CODE HERE ***" return result.strip() + word def shakespeare_tokens(path='shakespeare.txt', url='http://composingprograms.com/shakespeare.txt'): """Return the words of Shakespeare's plays as a list.""" import os from urllib.request import urlopen if os.path.exists(path): return open('shakespeare.txt', encoding='ascii').read().split() else: shakespeare = urlopen(url) return shakespeare.read().decode(encoding='ascii').split() # Uncomment the following two lines # tokens = shakespeare_tokens() # table = build_successors_table(tokens) def random_sent(): import random return construct_sent(random.choice(table['.']), table)
def closer_city(lat, lon, city1, city2): """ Returns the name of either city1 or city2, whichever is closest to coordinate (lat, lon). >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' """ tmp = make_city('tmp', lat, lon) dis1 = distance(city1, tmp) dis2 = distance(city2, tmp) if dis1 > dis2: return get_name(city2) else: return get_name(city1)
40
61
LAB_SOURCE_FILE = "lab05.py" """ Lab 05: Trees and Proj2 Prep """ def couple(lst1, lst2): """Return a list that contains lists with i-th elements of two sequences coupled together. >>> lst1 = [1, 2, 3] >>> lst2 = [4, 5, 6] >>> couple(lst1, lst2) [[1, 4], [2, 5], [3, 6]] >>> lst3 = ['c', 6] >>> lst4 = ['s', '1'] >>> couple(lst3, lst4) [['c', 's'], [6, '1']] """ assert len(lst1) == len(lst2) a = [] for i in range(len(lst1)): a.append([lst1[i], lst2[i]]) return a from math import sqrt def distance(city1, city2): """ >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 """ x1, y1 = get_lat(city1), get_lon(city1) x2, y2 = get_lat(city2), get_lon(city2) return sqrt((x1 - x2)**2 + (y1 - y2)**2) def closer_city(lat, lon, city1, city2): """ Returns the name of either city1 or city2, whichever is closest to coordinate (lat, lon). >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' """ tmp = make_city('tmp', lat, lon) dis1 = distance(city1, tmp) dis2 = distance(city2, tmp) if dis1 > dis2: return get_name(city2) else: return get_name(city1) def check_abstraction(): """ There's nothing for you to do for this function, it's just here for the extra doctest >>> change_abstraction(True) >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' >>> change_abstraction(False) """ # Treat all the following code as being behind an abstraction layer, you shouldn't need to look at it! def make_city(name, lat, lon): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' >>> get_lat(city) 0 >>> get_lon(city) 1 """ if change_abstraction.changed: return {"name" : name, "lat" : lat, "lon" : lon} else: return [name, lat, lon] def get_name(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' """ if change_abstraction.changed: return city["name"] else: return city[0] def get_lat(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lat(city) 0 """ if change_abstraction.changed: return city["lat"] else: return city[1] def get_lon(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lon(city) 1 """ if change_abstraction.changed: return city["lon"] else: return city[2] def change_abstraction(change): change_abstraction.changed = change change_abstraction.changed = False def nut_finder(t): """Returns True if t contains a node with the value 'nut' and False otherwise. >>> scrat = tree('nut') >>> nut_finder(scrat) True >>> sproul = tree('roots', [tree('branch1', [tree('leaf'), tree('nut')]), tree('branch2')]) >>> nut_finder(sproul) True >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> nut_finder(numbers) False >>> t = tree(1, [tree('nut',[tree('not nut')])]) >>> nut_finder(t) True """ if label(t) == 'nut': return True for node in branches(t): if nut_finder(node): return True return False def sprout_leaves(t, values): """Sprout new leaves containing the data in values at each leaf in the original tree t and return the resulting tree. >>> t1 = tree(1, [tree(2), tree(3)]) >>> print_tree(t1) 1 2 3 >>> new1 = sprout_leaves(t1, [4, 5]) >>> print_tree(new1) 1 2 4 5 3 4 5 >>> t2 = tree(1, [tree(2, [tree(3)])]) >>> print_tree(t2) 1 2 3 >>> new2 = sprout_leaves(t2, [6, 1, 2]) >>> print_tree(new2) 1 2 3 6 1 2 """ if is_leaf(t): return tree(label(t),[tree(v) for v in values]) return tree(label(t),[sprout_leaves(b, values) for b in branches(t)]) # Tree ADT def tree(label, branches=[]): """Construct a tree with the given label value and a list of branches.""" for branch in branches: assert is_tree(branch), 'branches must be trees' return [label] + list(branches) def label(tree): """Return the label value of a tree.""" return tree[0] def branches(tree): """Return the list of branches of the given tree.""" return tree[1:] def is_tree(tree): """Returns True if the given tree is a tree, and False otherwise.""" if type(tree) != list or len(tree) < 1: return False for branch in branches(tree): if not is_tree(branch): return False return True def is_leaf(tree): """Returns True if the given tree's list of branches is empty, and False otherwise. """ return not branches(tree) def print_tree(t, indent=0): """Print a representation of this tree in which each node is indented by two spaces times its depth from the root. >>> print_tree(tree(1)) 1 >>> print_tree(tree(1, [tree(2)])) 1 2 >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> print_tree(numbers) 1 2 3 4 5 6 7 """ print(' ' * indent + str(label(t))) for b in branches(t): print_tree(b, indent + 1) def copy_tree(t): """Returns a copy of t. Only for testing purposes. >>> t = tree(5) >>> copy = copy_tree(t) >>> t = tree(6) >>> print_tree(copy) 5 """ return tree(label(t), [copy_tree(b) for b in branches(t)]) def add_chars(w1, w2): """ Return a string containing the characters you need to add to w1 to get w2. You may assume that w1 is a subsequence of w2. >>> add_chars("owl", "howl") 'h' >>> add_chars("want", "wanton") 'on' >>> add_chars("rat", "radiate") 'diae' >>> add_chars("a", "prepare") 'prepre' >>> add_chars("resin", "recursion") 'curo' >>> add_chars("fin", "effusion") 'efuso' >>> add_chars("coy", "cacophony") 'acphon' >>> from construct_check import check >>> # ban iteration and sets >>> check(LAB_SOURCE_FILE, 'add_chars', ... ['For', 'While', 'Set', 'SetComp']) # Must use recursion True """ "*** YOUR CODE HERE ***" def add_trees(t1, t2): """ >>> numbers = tree(1, ... [tree(2, ... [tree(3), ... tree(4)]), ... tree(5, ... [tree(6, ... [tree(7)]), ... tree(8)])]) >>> print_tree(add_trees(numbers, numbers)) 2 4 6 8 10 12 14 16 >>> print_tree(add_trees(tree(2), tree(3, [tree(4), tree(5)]))) 5 4 5 >>> print_tree(add_trees(tree(2, [tree(3)]), tree(2, [tree(3), tree(4)]))) 4 6 4 >>> print_tree(add_trees(tree(2, [tree(3, [tree(4), tree(5)])]), \ tree(2, [tree(3, [tree(4)]), tree(5)]))) 4 6 8 5 5 """ "*** YOUR CODE HERE ***" # Shakespeare and Dictionaries def build_successors_table(tokens): """Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We'] """ table = {} prev = '.' for word in tokens: if prev not in table: "*** YOUR CODE HERE ***" "*** YOUR CODE HERE ***" prev = word return table def construct_sent(word, table): """Prints a random sentence starting with word, sampling from table. >>> table = {'Wow': ['!'], 'Sentences': ['are'], 'are': ['cool'], 'cool': ['.']} >>> construct_sent('Wow', table) 'Wow!' >>> construct_sent('Sentences', table) 'Sentences are cool.' """ import random result = '' while word not in ['.', '!', '?']: "*** YOUR CODE HERE ***" return result.strip() + word def shakespeare_tokens(path='shakespeare.txt', url='http://composingprograms.com/shakespeare.txt'): """Return the words of Shakespeare's plays as a list.""" import os from urllib.request import urlopen if os.path.exists(path): return open('shakespeare.txt', encoding='ascii').read().split() else: shakespeare = urlopen(url) return shakespeare.read().decode(encoding='ascii').split() # Uncomment the following two lines # tokens = shakespeare_tokens() # table = build_successors_table(tokens) def random_sent(): import random return construct_sent(random.choice(table['.']), table)
build_successors_table
Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We']
LAB_SOURCE_FILE = "lab05.py" """ Lab 05: Trees and Proj2 Prep """ def couple(lst1, lst2): """Return a list that contains lists with i-th elements of two sequences coupled together. >>> lst1 = [1, 2, 3] >>> lst2 = [4, 5, 6] >>> couple(lst1, lst2) [[1, 4], [2, 5], [3, 6]] >>> lst3 = ['c', 6] >>> lst4 = ['s', '1'] >>> couple(lst3, lst4) [['c', 's'], [6, '1']] """ assert len(lst1) == len(lst2) a = [] for i in range(len(lst1)): a.append([lst1[i], lst2[i]]) return a from math import sqrt def distance(city1, city2): """ >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 """ x1, y1 = get_lat(city1), get_lon(city1) x2, y2 = get_lat(city2), get_lon(city2) return sqrt((x1 - x2)**2 + (y1 - y2)**2) def closer_city(lat, lon, city1, city2): """ Returns the name of either city1 or city2, whichever is closest to coordinate (lat, lon). >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' """ tmp = make_city('tmp', lat, lon) dis1 = distance(city1, tmp) dis2 = distance(city2, tmp) if dis1 > dis2: return get_name(city2) else: return get_name(city1) def check_abstraction(): """ There's nothing for you to do for this function, it's just here for the extra doctest >>> change_abstraction(True) >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' >>> change_abstraction(False) """ # Treat all the following code as being behind an abstraction layer, you shouldn't need to look at it! def make_city(name, lat, lon): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' >>> get_lat(city) 0 >>> get_lon(city) 1 """ if change_abstraction.changed: return {"name" : name, "lat" : lat, "lon" : lon} else: return [name, lat, lon] def get_name(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' """ if change_abstraction.changed: return city["name"] else: return city[0] def get_lat(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lat(city) 0 """ if change_abstraction.changed: return city["lat"] else: return city[1] def get_lon(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lon(city) 1 """ if change_abstraction.changed: return city["lon"] else: return city[2] def change_abstraction(change): change_abstraction.changed = change change_abstraction.changed = False def nut_finder(t): """Returns True if t contains a node with the value 'nut' and False otherwise. >>> scrat = tree('nut') >>> nut_finder(scrat) True >>> sproul = tree('roots', [tree('branch1', [tree('leaf'), tree('nut')]), tree('branch2')]) >>> nut_finder(sproul) True >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> nut_finder(numbers) False >>> t = tree(1, [tree('nut',[tree('not nut')])]) >>> nut_finder(t) True """ if label(t) == 'nut': return True for node in branches(t): if nut_finder(node): return True return False def sprout_leaves(t, values): """Sprout new leaves containing the data in values at each leaf in the original tree t and return the resulting tree. >>> t1 = tree(1, [tree(2), tree(3)]) >>> print_tree(t1) 1 2 3 >>> new1 = sprout_leaves(t1, [4, 5]) >>> print_tree(new1) 1 2 4 5 3 4 5 >>> t2 = tree(1, [tree(2, [tree(3)])]) >>> print_tree(t2) 1 2 3 >>> new2 = sprout_leaves(t2, [6, 1, 2]) >>> print_tree(new2) 1 2 3 6 1 2 """ if is_leaf(t): return tree(label(t),[tree(v) for v in values]) return tree(label(t),[sprout_leaves(b, values) for b in branches(t)]) # Tree ADT def tree(label, branches=[]): """Construct a tree with the given label value and a list of branches.""" for branch in branches: assert is_tree(branch), 'branches must be trees' return [label] + list(branches) def label(tree): """Return the label value of a tree.""" return tree[0] def branches(tree): """Return the list of branches of the given tree.""" return tree[1:] def is_tree(tree): """Returns True if the given tree is a tree, and False otherwise.""" if type(tree) != list or len(tree) < 1: return False for branch in branches(tree): if not is_tree(branch): return False return True def is_leaf(tree): """Returns True if the given tree's list of branches is empty, and False otherwise. """ return not branches(tree) def print_tree(t, indent=0): """Print a representation of this tree in which each node is indented by two spaces times its depth from the root. >>> print_tree(tree(1)) 1 >>> print_tree(tree(1, [tree(2)])) 1 2 >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> print_tree(numbers) 1 2 3 4 5 6 7 """ print(' ' * indent + str(label(t))) for b in branches(t): print_tree(b, indent + 1) def copy_tree(t): """Returns a copy of t. Only for testing purposes. >>> t = tree(5) >>> copy = copy_tree(t) >>> t = tree(6) >>> print_tree(copy) 5 """ return tree(label(t), [copy_tree(b) for b in branches(t)]) def add_chars(w1, w2): """ Return a string containing the characters you need to add to w1 to get w2. You may assume that w1 is a subsequence of w2. >>> add_chars("owl", "howl") 'h' >>> add_chars("want", "wanton") 'on' >>> add_chars("rat", "radiate") 'diae' >>> add_chars("a", "prepare") 'prepre' >>> add_chars("resin", "recursion") 'curo' >>> add_chars("fin", "effusion") 'efuso' >>> add_chars("coy", "cacophony") 'acphon' >>> from construct_check import check >>> # ban iteration and sets >>> check(LAB_SOURCE_FILE, 'add_chars', ... ['For', 'While', 'Set', 'SetComp']) # Must use recursion True """ "*** YOUR CODE HERE ***" def add_trees(t1, t2): """ >>> numbers = tree(1, ... [tree(2, ... [tree(3), ... tree(4)]), ... tree(5, ... [tree(6, ... [tree(7)]), ... tree(8)])]) >>> print_tree(add_trees(numbers, numbers)) 2 4 6 8 10 12 14 16 >>> print_tree(add_trees(tree(2), tree(3, [tree(4), tree(5)]))) 5 4 5 >>> print_tree(add_trees(tree(2, [tree(3)]), tree(2, [tree(3), tree(4)]))) 4 6 4 >>> print_tree(add_trees(tree(2, [tree(3, [tree(4), tree(5)])]), \ tree(2, [tree(3, [tree(4)]), tree(5)]))) 4 6 8 5 5 """ "*** YOUR CODE HERE ***" # Shakespeare and Dictionaries # MASKED: build_successors_table function (lines 335-356) def construct_sent(word, table): """Prints a random sentence starting with word, sampling from table. >>> table = {'Wow': ['!'], 'Sentences': ['are'], 'are': ['cool'], 'cool': ['.']} >>> construct_sent('Wow', table) 'Wow!' >>> construct_sent('Sentences', table) 'Sentences are cool.' """ import random result = '' while word not in ['.', '!', '?']: "*** YOUR CODE HERE ***" return result.strip() + word def shakespeare_tokens(path='shakespeare.txt', url='http://composingprograms.com/shakespeare.txt'): """Return the words of Shakespeare's plays as a list.""" import os from urllib.request import urlopen if os.path.exists(path): return open('shakespeare.txt', encoding='ascii').read().split() else: shakespeare = urlopen(url) return shakespeare.read().decode(encoding='ascii').split() # Uncomment the following two lines # tokens = shakespeare_tokens() # table = build_successors_table(tokens) def random_sent(): import random return construct_sent(random.choice(table['.']), table)
def build_successors_table(tokens): """Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We'] """ table = {} prev = '.' for word in tokens: if prev not in table: "*** YOUR CODE HERE ***" "*** YOUR CODE HERE ***" prev = word return table
335
356
LAB_SOURCE_FILE = "lab05.py" """ Lab 05: Trees and Proj2 Prep """ def couple(lst1, lst2): """Return a list that contains lists with i-th elements of two sequences coupled together. >>> lst1 = [1, 2, 3] >>> lst2 = [4, 5, 6] >>> couple(lst1, lst2) [[1, 4], [2, 5], [3, 6]] >>> lst3 = ['c', 6] >>> lst4 = ['s', '1'] >>> couple(lst3, lst4) [['c', 's'], [6, '1']] """ assert len(lst1) == len(lst2) a = [] for i in range(len(lst1)): a.append([lst1[i], lst2[i]]) return a from math import sqrt def distance(city1, city2): """ >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 """ x1, y1 = get_lat(city1), get_lon(city1) x2, y2 = get_lat(city2), get_lon(city2) return sqrt((x1 - x2)**2 + (y1 - y2)**2) def closer_city(lat, lon, city1, city2): """ Returns the name of either city1 or city2, whichever is closest to coordinate (lat, lon). >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' """ tmp = make_city('tmp', lat, lon) dis1 = distance(city1, tmp) dis2 = distance(city2, tmp) if dis1 > dis2: return get_name(city2) else: return get_name(city1) def check_abstraction(): """ There's nothing for you to do for this function, it's just here for the extra doctest >>> change_abstraction(True) >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' >>> change_abstraction(False) """ # Treat all the following code as being behind an abstraction layer, you shouldn't need to look at it! def make_city(name, lat, lon): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' >>> get_lat(city) 0 >>> get_lon(city) 1 """ if change_abstraction.changed: return {"name" : name, "lat" : lat, "lon" : lon} else: return [name, lat, lon] def get_name(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' """ if change_abstraction.changed: return city["name"] else: return city[0] def get_lat(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lat(city) 0 """ if change_abstraction.changed: return city["lat"] else: return city[1] def get_lon(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lon(city) 1 """ if change_abstraction.changed: return city["lon"] else: return city[2] def change_abstraction(change): change_abstraction.changed = change change_abstraction.changed = False def nut_finder(t): """Returns True if t contains a node with the value 'nut' and False otherwise. >>> scrat = tree('nut') >>> nut_finder(scrat) True >>> sproul = tree('roots', [tree('branch1', [tree('leaf'), tree('nut')]), tree('branch2')]) >>> nut_finder(sproul) True >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> nut_finder(numbers) False >>> t = tree(1, [tree('nut',[tree('not nut')])]) >>> nut_finder(t) True """ if label(t) == 'nut': return True for node in branches(t): if nut_finder(node): return True return False def sprout_leaves(t, values): """Sprout new leaves containing the data in values at each leaf in the original tree t and return the resulting tree. >>> t1 = tree(1, [tree(2), tree(3)]) >>> print_tree(t1) 1 2 3 >>> new1 = sprout_leaves(t1, [4, 5]) >>> print_tree(new1) 1 2 4 5 3 4 5 >>> t2 = tree(1, [tree(2, [tree(3)])]) >>> print_tree(t2) 1 2 3 >>> new2 = sprout_leaves(t2, [6, 1, 2]) >>> print_tree(new2) 1 2 3 6 1 2 """ if is_leaf(t): return tree(label(t),[tree(v) for v in values]) return tree(label(t),[sprout_leaves(b, values) for b in branches(t)]) # Tree ADT def tree(label, branches=[]): """Construct a tree with the given label value and a list of branches.""" for branch in branches: assert is_tree(branch), 'branches must be trees' return [label] + list(branches) def label(tree): """Return the label value of a tree.""" return tree[0] def branches(tree): """Return the list of branches of the given tree.""" return tree[1:] def is_tree(tree): """Returns True if the given tree is a tree, and False otherwise.""" if type(tree) != list or len(tree) < 1: return False for branch in branches(tree): if not is_tree(branch): return False return True def is_leaf(tree): """Returns True if the given tree's list of branches is empty, and False otherwise. """ return not branches(tree) def print_tree(t, indent=0): """Print a representation of this tree in which each node is indented by two spaces times its depth from the root. >>> print_tree(tree(1)) 1 >>> print_tree(tree(1, [tree(2)])) 1 2 >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> print_tree(numbers) 1 2 3 4 5 6 7 """ print(' ' * indent + str(label(t))) for b in branches(t): print_tree(b, indent + 1) def copy_tree(t): """Returns a copy of t. Only for testing purposes. >>> t = tree(5) >>> copy = copy_tree(t) >>> t = tree(6) >>> print_tree(copy) 5 """ return tree(label(t), [copy_tree(b) for b in branches(t)]) def add_chars(w1, w2): """ Return a string containing the characters you need to add to w1 to get w2. You may assume that w1 is a subsequence of w2. >>> add_chars("owl", "howl") 'h' >>> add_chars("want", "wanton") 'on' >>> add_chars("rat", "radiate") 'diae' >>> add_chars("a", "prepare") 'prepre' >>> add_chars("resin", "recursion") 'curo' >>> add_chars("fin", "effusion") 'efuso' >>> add_chars("coy", "cacophony") 'acphon' >>> from construct_check import check >>> # ban iteration and sets >>> check(LAB_SOURCE_FILE, 'add_chars', ... ['For', 'While', 'Set', 'SetComp']) # Must use recursion True """ "*** YOUR CODE HERE ***" def add_trees(t1, t2): """ >>> numbers = tree(1, ... [tree(2, ... [tree(3), ... tree(4)]), ... tree(5, ... [tree(6, ... [tree(7)]), ... tree(8)])]) >>> print_tree(add_trees(numbers, numbers)) 2 4 6 8 10 12 14 16 >>> print_tree(add_trees(tree(2), tree(3, [tree(4), tree(5)]))) 5 4 5 >>> print_tree(add_trees(tree(2, [tree(3)]), tree(2, [tree(3), tree(4)]))) 4 6 4 >>> print_tree(add_trees(tree(2, [tree(3, [tree(4), tree(5)])]), \ tree(2, [tree(3, [tree(4)]), tree(5)]))) 4 6 8 5 5 """ "*** YOUR CODE HERE ***" # Shakespeare and Dictionaries def build_successors_table(tokens): """Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We'] """ table = {} prev = '.' for word in tokens: if prev not in table: "*** YOUR CODE HERE ***" "*** YOUR CODE HERE ***" prev = word return table def construct_sent(word, table): """Prints a random sentence starting with word, sampling from table. >>> table = {'Wow': ['!'], 'Sentences': ['are'], 'are': ['cool'], 'cool': ['.']} >>> construct_sent('Wow', table) 'Wow!' >>> construct_sent('Sentences', table) 'Sentences are cool.' """ import random result = '' while word not in ['.', '!', '?']: "*** YOUR CODE HERE ***" return result.strip() + word def shakespeare_tokens(path='shakespeare.txt', url='http://composingprograms.com/shakespeare.txt'): """Return the words of Shakespeare's plays as a list.""" import os from urllib.request import urlopen if os.path.exists(path): return open('shakespeare.txt', encoding='ascii').read().split() else: shakespeare = urlopen(url) return shakespeare.read().decode(encoding='ascii').split() # Uncomment the following two lines # tokens = shakespeare_tokens() # table = build_successors_table(tokens) def random_sent(): import random return construct_sent(random.choice(table['.']), table)
__init__
Init. Args: name: the entity's name. cloud_device_id: Device id iot core or any iot application. type_name: DBO entity type stored in EntityType instance. fields: List of standard field names. is_reporting: if an entity maps 1:1 to a reporting device, it is a reporting entity. guid: [Optional] Universally Unique identification code for an entity. metadata: Contextual metadata about an entity.
# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for concrete model entities.""" from typing import Dict, Optional, List from model.entity_field import EntityField from model.entity_type import EntityType class Entity(object): """Class to represent entities within the concrete model. Attributes: name: Human readable name of an entity. guid: UUID4 value for an entity. cloud_device_id: IoT application device id. type_name: Instance of EntityType class. fields: Mapping of standard field names to EntityField instances. is_reporting: if an entity maps 1:1 to a reporting device, it is a reporting entity. metadata: Contextual metadata coming from a physical device. i.e. { location: '/Sif-Solo/Site 1 - Sif/Charleston Road North/B13 - 1875 Charleston/Roof', control_programs: ['Trane AC-1', '1875 Charleston'], device_id: 'DEV:2809009' } """ # MASKED: __init__ function (lines 42-69) @classmethod def FromDict(cls, entity_dict: Dict[str, object]): """class method to create an instance of Entity from mapping of entity attributes to values. Args: entity_dict: dictionary mapping field attributes to values from a loadsheet or building configuration. Returns: An instance of Entity class. """ @property def fields(self) -> Dict[str, EntityField]: """Returns a mapping of standard field names to EntityField instances associated with self.""" return self._fields @fields.setter def fields(self, new_fields: Dict[str, EntityField]) -> None: """Validates that each value of new_fields is an instance of EntityField class and sets. Arguments: new_fields: A mapping of standard field names to EntityField instances. """ @property def guid(self) -> str: """Returns the GUID associated with self.""" return self._guid @guid.setter def guid(self, guid: Optional[str] = None) -> None: """If guid argument is none, generate a new guid for set or just set if none. Args: guid: [Optional] A UUID string. """
def __init__(self, name: str, cloud_device_id: Optional[str], type_name: EntityType, fields: List[EntityField], is_reporting: bool, guid: Optional[str] = None, metadata: Optional[Dict[str, str]] = None): """Init. Args: name: the entity's name. cloud_device_id: Device id iot core or any iot application. type_name: DBO entity type stored in EntityType instance. fields: List of standard field names. is_reporting: if an entity maps 1:1 to a reporting device, it is a reporting entity. guid: [Optional] Universally Unique identification code for an entity. metadata: Contextual metadata about an entity. """ self.name = name self._guid = guid self.cloud_device_id = cloud_device_id self.type_name = type_name self._fields = fields self.is_reporting = is_reporting self.metadata = metadata
42
69
# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for concrete model entities.""" from typing import Dict, Optional, List from model.entity_field import EntityField from model.entity_type import EntityType class Entity(object): """Class to represent entities within the concrete model. Attributes: name: Human readable name of an entity. guid: UUID4 value for an entity. cloud_device_id: IoT application device id. type_name: Instance of EntityType class. fields: Mapping of standard field names to EntityField instances. is_reporting: if an entity maps 1:1 to a reporting device, it is a reporting entity. metadata: Contextual metadata coming from a physical device. i.e. { location: '/Sif-Solo/Site 1 - Sif/Charleston Road North/B13 - 1875 Charleston/Roof', control_programs: ['Trane AC-1', '1875 Charleston'], device_id: 'DEV:2809009' } """ def __init__(self, name: str, cloud_device_id: Optional[str], type_name: EntityType, fields: List[EntityField], is_reporting: bool, guid: Optional[str] = None, metadata: Optional[Dict[str, str]] = None): """Init. Args: name: the entity's name. cloud_device_id: Device id iot core or any iot application. type_name: DBO entity type stored in EntityType instance. fields: List of standard field names. is_reporting: if an entity maps 1:1 to a reporting device, it is a reporting entity. guid: [Optional] Universally Unique identification code for an entity. metadata: Contextual metadata about an entity. """ self.name = name self._guid = guid self.cloud_device_id = cloud_device_id self.type_name = type_name self._fields = fields self.is_reporting = is_reporting self.metadata = metadata @classmethod def FromDict(cls, entity_dict: Dict[str, object]): """class method to create an instance of Entity from mapping of entity attributes to values. Args: entity_dict: dictionary mapping field attributes to values from a loadsheet or building configuration. Returns: An instance of Entity class. """ @property def fields(self) -> Dict[str, EntityField]: """Returns a mapping of standard field names to EntityField instances associated with self.""" return self._fields @fields.setter def fields(self, new_fields: Dict[str, EntityField]) -> None: """Validates that each value of new_fields is an instance of EntityField class and sets. Arguments: new_fields: A mapping of standard field names to EntityField instances. """ @property def guid(self) -> str: """Returns the GUID associated with self.""" return self._guid @guid.setter def guid(self, guid: Optional[str] = None) -> None: """If guid argument is none, generate a new guid for set or just set if none. Args: guid: [Optional] A UUID string. """
build_simple_model
Build a simple model for test Returns: DNN, [ (input layer name, input placeholder, input data) ], Target data
''' This file contains test cases for tflearn ''' import tensorflow as tf import zqtflearn import unittest class TestInputs(unittest.TestCase): ''' This class contains test cases for serval input types ''' INPUT_DATA_1 = [ [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ] ] INPUT_DATA_2 = [ [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ] ] TARGET = [ [ 14 ], [ 18 ], [ 22 ], [ 26 ], [ 30 ] ] # (input1 + input2) * 2 def test_list_inputs(self): """Test input a list """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit([ inpData for _, _, inpData in inputs ], target, batch_size = 1) def test_dict_inputs(self): """Test input a dict with layer name """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit({ name: inpData for name, _, inpData in inputs }, target, batch_size = 1) def test_dict_withtensor_inputs(self): """Test input a dict with placeholder """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit({ placeholder: inpData for _, placeholder, inpData in inputs }, target, batch_size = 1) # MASKED: build_simple_model function (lines 38-55) if __name__ == "__main__": unittest.main()
def build_simple_model(self): """Build a simple model for test Returns: DNN, [ (input layer name, input placeholder, input data) ], Target data """ inputPlaceholder1, inputPlaceholder2 = \ tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2") input1 = zqtflearn.input_data(placeholder = inputPlaceholder1) input2 = zqtflearn.input_data(placeholder = inputPlaceholder2) network = zqtflearn.merge([input1, input2], "sum") network = zqtflearn.reshape(network, (1, 1)) network = zqtflearn.fully_connected(network, 1) network = zqtflearn.regression(network) return ( zqtflearn.DNN(network), [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ], self.TARGET, )
38
55
''' This file contains test cases for tflearn ''' import tensorflow as tf import zqtflearn import unittest class TestInputs(unittest.TestCase): ''' This class contains test cases for serval input types ''' INPUT_DATA_1 = [ [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ] ] INPUT_DATA_2 = [ [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ] ] TARGET = [ [ 14 ], [ 18 ], [ 22 ], [ 26 ], [ 30 ] ] # (input1 + input2) * 2 def test_list_inputs(self): """Test input a list """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit([ inpData for _, _, inpData in inputs ], target, batch_size = 1) def test_dict_inputs(self): """Test input a dict with layer name """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit({ name: inpData for name, _, inpData in inputs }, target, batch_size = 1) def test_dict_withtensor_inputs(self): """Test input a dict with placeholder """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit({ placeholder: inpData for _, placeholder, inpData in inputs }, target, batch_size = 1) def build_simple_model(self): """Build a simple model for test Returns: DNN, [ (input layer name, input placeholder, input data) ], Target data """ inputPlaceholder1, inputPlaceholder2 = \ tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2") input1 = zqtflearn.input_data(placeholder = inputPlaceholder1) input2 = zqtflearn.input_data(placeholder = inputPlaceholder2) network = zqtflearn.merge([input1, input2], "sum") network = zqtflearn.reshape(network, (1, 1)) network = zqtflearn.fully_connected(network, 1) network = zqtflearn.regression(network) return ( zqtflearn.DNN(network), [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ], self.TARGET, ) if __name__ == "__main__": unittest.main()
from_b58check
Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) # MASKED: from_b58check function (lines 348-363) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
@staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big'))
348
363
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
raw_sign
Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key # MASKED: raw_sign function (lines 392-428) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id)
392
428
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
sign_bitcoin
Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) # MASKED: sign_bitcoin function (lines 456-492) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig))
456
492
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
from_der
Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ # MASKED: from_der function (lines 811-884) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
@staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s)
811
884
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
to_der
Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s).
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv # MASKED: to_der function (lines 971-983) def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der
971
983
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
_set_widget_id
Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element.
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) # MASKED: _set_widget_id function (lines 156-184) def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id
156
184
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
_value_or_dg
Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None.
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() # MASKED: _value_or_dg function (lines 3163-3179) def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value
3,163
3,179
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
_get_coordinates
Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place.
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor # MASKED: _get_coordinates function (lines 312-334) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index)
312
334
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
_enqueue_new_element_delta
Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element.
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) # MASKED: _enqueue_new_element_delta function (lines 336-400) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg)
336
400
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
latex
Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html # MASKED: latex function (lines 516-551) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
@_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body)
516
551
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
json
Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) # MASKED: json function (lines 585-626) @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
@_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body
585
626
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
dataframe
Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) # MASKED: dataframe function (lines 807-863) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height )
807
863
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
button
Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye')
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) # MASKED: button function (lines 1610-1642) @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
@_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value
1,610
1,642
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
text_input
Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title)
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) # MASKED: text_input function (lines 2231-2285) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
@_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value)
2,231
2,285
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
text_area
Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt))
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) # MASKED: text_area function (lines 2287-2339) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
@_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value)
2,287
2,339
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
time_input
Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t)
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) # MASKED: time_input function (lines 2341-2392) @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
@_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value
2,341
2,392
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
map
Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True # MASKED: map function (lines 2743-2788) @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
@_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width
2,743
2,788
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
ra_dec_format
Ra/Dec string formatting Converts the input string format of a right ascension/ declination coordinate to one recognizable by astroquery Args: val (str): string; an ra/dec expression formatted as "005313.81 +130955.0". Returns: string: the ra/dec coordinates re-formatted as "00h53m13.81s +13d09m55.0s"
#define functions that will extract the data from SDSS based on an input RA/DEC from astroquery.sdss import SDSS from astropy import coordinates as coords import pandas as pd from astroquery.ned import Ned import matplotlib.pyplot as plt from astropy.convolution import convolve, Box1DKernel import numpy as np from astropy import units as u # MASKED: ra_dec_format function (lines 13-35) def extractor(position): """ This function extracts the information from the SDSS database and returns a pandas dataframe with the query region. Please ensure that the 'position' input is formatted as '005313.81 +130955.0 extractor(str) --> pd.DataFrame """ # convert the input position argument to the format recognized by astroquery.SDSS # position=ra_dec_format(position) # query the region and get the data position = ra_dec_format(position) pos = coords.SkyCoord(position, frame='icrs') data = SDSS.query_region(pos, spectro=True) return data.to_pandas() def downloader(data): """ This function uses extracted information in order to dwonaload spectra, separating the data from th SDSS and BOSS. downloader(pd.Dataframe) --> [list(fits)] """ #create a empty list spec_list=[] # iteration over the pandas for i in range(len(data)): results = SDSS.query_specobj(plate = data['plate'][i], mjd = data['mjd'][i], fiberID = data['fiberID'][i]) # try if it can download the data (SDSS) try: spec = SDSS.get_spectra(matches=results)[0] spec_list.append(spec) # if it cant download, is because is from (BOSS) except: results.remove_column("instrument") results.add_column(name="instrument", col="eboss") # replace the instrument column spec = SDSS.get_spectra(matches=results)[0] spec_list.append(spec) return spec_list # test=downloader(result) # print(test) # define a function which grabs the object's redshift from the Ned database (better calibration)- needed for plotting in the object's rest-frame def redshift(position): # make sure to format the input position argument such that it is recognizable by astroquery.Ned # position=ra_dec_format(position) position = ra_dec_format(position) pos=coords.SkyCoord(position, frame='icrs') # create a position object ned_results=Ned.query_region(pos,equinox="J2000", radius=2*u.arcsecond) # query the database z=ned_results[0][6] # grab the redshift value from the query results return z # define a function that transforms an objects wavelength array into the object's rest-frame def redshift_correct(z, wavelengths): # takes as input the redshift and the array of wavelengths wavelengths_corrected = wavelengths/(z+1) return wavelengths_corrected # define a function that transforms the results of downloader() into an array of data which will be plotted def transform_data(spec_list, z): # takes as input a list of (I think?) fits files results and the redshift of the object # iterate over each file and grab the important data #fluxes={} # containers for each of the data arrays to be plotted ( will be lists of lists/arrays) #wavelengths={} #inverse_variances={} # <- dictionaries! dict={} for spec in spec_list: flux_array=[] wavelength_array=[] sigma_array=[] data=spec[1].data # this is the data part of the file #print(data.shape[0]) #print(data) # store the appropriate columns in the designated containers- each row is a single spectrum? # SOFIA- try a nested dictionary?!?! for j in range(data.shape[0]): #print(data[j][0]) #smoothedFlux=convolve(data[0],Box1DKernel(9)) # smooth the fluxes using a boxcar #print(smoothedFlux) flux_data = data[j][0] flux_array.append(flux_data) wavelengths_uncorrected=10**data[j][1] # the wavelengths (transformed from the log scale) #print(wavelengths_uncorrected) wavelengths_corrected=redshift_correct(z, wavelengths_uncorrected) # save the wavelengths after they have been scaled to the rest-frame #print(wavelengths_corrected) wavelength_array.append(wavelengths_corrected) inverse_variance=data[j][2] # the inverse variance of the flux one_over_sigma=inverse_variance**0.5 sigma=1/one_over_sigma # the one-sigma uncertainty associated with the flux array sigma_array.append(sigma) smoothedFlux = convolve(flux_array,Box1DKernel(9)) if 'flux' in dict: dict['flux'].append([smoothedFlux]) else: dict['flux'] = [smoothedFlux] if 'wavelength' in dict: dict['wavelength'].append([wavelength_array]) else: dict['wavelength'] = [wavelength_array] if '1sigma' in dict: dict['1sigma'].append([sigma_array]) else: dict['1sigma'] = [sigma_array] # now return the nested dictionary with three keys:(flux, wavelength and sigma) # each key should have data.shape[0] number of arrays with all fluxes, wavelength and sigmas for every spec in spec_list return dict def plot_spec(dict, radec, z): # takes as input the dictionary holding the data, the radec, and the redshift for i in range(len(dict['wavelength'])): #extract data wavelength = dict['wavelength'][i] sigma = dict['1sigma'][i] flux = dict['flux'][i] # instantiate a figure object fig=plt.figure() plt.title(str(radec)+str('; ')+'z={}'.format(z)) plt.xlabel("Rest-frame Wavelength [$\AA$]") plt.ylabel("Flux [$10^{-17}$ erg$^{-1}$s$^{-1}$cm$^{-2}$$\AA^{-1}$]") plt.plot(wavelength, flux) # plot the actual data # now create upper and lower bounds on the uncertainty regions sigmaUpper=np.add(flux,sigma) sigmaLower=np.subtract(flux,sigma) plt.fill_between(wavelength, sigmaLower, sigmaUpper, color='grey', alpha=0.5) plt.show() #TEST radec='223812.39 +213203.4' z=redshift(radec) data=extractor(radec) spec_list=downloader(data) dic = transform_data(spec_list,z) plot_spec(dic, radec, z)
def ra_dec_format(val): """ Ra/Dec string formatting Converts the input string format of a right ascension/ declination coordinate to one recognizable by astroquery Args: val (str): string; an ra/dec expression formatted as "005313.81 +130955.0". Returns: string: the ra/dec coordinates re-formatted as "00h53m13.81s +13d09m55.0s" """ #ra hour = val[0:2] min_ = val[2:4] sec = val[4:9] ra = hour+'h'+min_+'m'+sec+'s' #dec deg = val[9:13] min_d = val[13:15] sec_d = val[15:] dec = deg+'d'+min_d+'m'+sec_d+'s' return ra+" "+dec
13
35
#define functions that will extract the data from SDSS based on an input RA/DEC from astroquery.sdss import SDSS from astropy import coordinates as coords import pandas as pd from astroquery.ned import Ned import matplotlib.pyplot as plt from astropy.convolution import convolve, Box1DKernel import numpy as np from astropy import units as u def ra_dec_format(val): """ Ra/Dec string formatting Converts the input string format of a right ascension/ declination coordinate to one recognizable by astroquery Args: val (str): string; an ra/dec expression formatted as "005313.81 +130955.0". Returns: string: the ra/dec coordinates re-formatted as "00h53m13.81s +13d09m55.0s" """ #ra hour = val[0:2] min_ = val[2:4] sec = val[4:9] ra = hour+'h'+min_+'m'+sec+'s' #dec deg = val[9:13] min_d = val[13:15] sec_d = val[15:] dec = deg+'d'+min_d+'m'+sec_d+'s' return ra+" "+dec def extractor(position): """ This function extracts the information from the SDSS database and returns a pandas dataframe with the query region. Please ensure that the 'position' input is formatted as '005313.81 +130955.0 extractor(str) --> pd.DataFrame """ # convert the input position argument to the format recognized by astroquery.SDSS # position=ra_dec_format(position) # query the region and get the data position = ra_dec_format(position) pos = coords.SkyCoord(position, frame='icrs') data = SDSS.query_region(pos, spectro=True) return data.to_pandas() def downloader(data): """ This function uses extracted information in order to dwonaload spectra, separating the data from th SDSS and BOSS. downloader(pd.Dataframe) --> [list(fits)] """ #create a empty list spec_list=[] # iteration over the pandas for i in range(len(data)): results = SDSS.query_specobj(plate = data['plate'][i], mjd = data['mjd'][i], fiberID = data['fiberID'][i]) # try if it can download the data (SDSS) try: spec = SDSS.get_spectra(matches=results)[0] spec_list.append(spec) # if it cant download, is because is from (BOSS) except: results.remove_column("instrument") results.add_column(name="instrument", col="eboss") # replace the instrument column spec = SDSS.get_spectra(matches=results)[0] spec_list.append(spec) return spec_list # test=downloader(result) # print(test) # define a function which grabs the object's redshift from the Ned database (better calibration)- needed for plotting in the object's rest-frame def redshift(position): # make sure to format the input position argument such that it is recognizable by astroquery.Ned # position=ra_dec_format(position) position = ra_dec_format(position) pos=coords.SkyCoord(position, frame='icrs') # create a position object ned_results=Ned.query_region(pos,equinox="J2000", radius=2*u.arcsecond) # query the database z=ned_results[0][6] # grab the redshift value from the query results return z # define a function that transforms an objects wavelength array into the object's rest-frame def redshift_correct(z, wavelengths): # takes as input the redshift and the array of wavelengths wavelengths_corrected = wavelengths/(z+1) return wavelengths_corrected # define a function that transforms the results of downloader() into an array of data which will be plotted def transform_data(spec_list, z): # takes as input a list of (I think?) fits files results and the redshift of the object # iterate over each file and grab the important data #fluxes={} # containers for each of the data arrays to be plotted ( will be lists of lists/arrays) #wavelengths={} #inverse_variances={} # <- dictionaries! dict={} for spec in spec_list: flux_array=[] wavelength_array=[] sigma_array=[] data=spec[1].data # this is the data part of the file #print(data.shape[0]) #print(data) # store the appropriate columns in the designated containers- each row is a single spectrum? # SOFIA- try a nested dictionary?!?! for j in range(data.shape[0]): #print(data[j][0]) #smoothedFlux=convolve(data[0],Box1DKernel(9)) # smooth the fluxes using a boxcar #print(smoothedFlux) flux_data = data[j][0] flux_array.append(flux_data) wavelengths_uncorrected=10**data[j][1] # the wavelengths (transformed from the log scale) #print(wavelengths_uncorrected) wavelengths_corrected=redshift_correct(z, wavelengths_uncorrected) # save the wavelengths after they have been scaled to the rest-frame #print(wavelengths_corrected) wavelength_array.append(wavelengths_corrected) inverse_variance=data[j][2] # the inverse variance of the flux one_over_sigma=inverse_variance**0.5 sigma=1/one_over_sigma # the one-sigma uncertainty associated with the flux array sigma_array.append(sigma) smoothedFlux = convolve(flux_array,Box1DKernel(9)) if 'flux' in dict: dict['flux'].append([smoothedFlux]) else: dict['flux'] = [smoothedFlux] if 'wavelength' in dict: dict['wavelength'].append([wavelength_array]) else: dict['wavelength'] = [wavelength_array] if '1sigma' in dict: dict['1sigma'].append([sigma_array]) else: dict['1sigma'] = [sigma_array] # now return the nested dictionary with three keys:(flux, wavelength and sigma) # each key should have data.shape[0] number of arrays with all fluxes, wavelength and sigmas for every spec in spec_list return dict def plot_spec(dict, radec, z): # takes as input the dictionary holding the data, the radec, and the redshift for i in range(len(dict['wavelength'])): #extract data wavelength = dict['wavelength'][i] sigma = dict['1sigma'][i] flux = dict['flux'][i] # instantiate a figure object fig=plt.figure() plt.title(str(radec)+str('; ')+'z={}'.format(z)) plt.xlabel("Rest-frame Wavelength [$\AA$]") plt.ylabel("Flux [$10^{-17}$ erg$^{-1}$s$^{-1}$cm$^{-2}$$\AA^{-1}$]") plt.plot(wavelength, flux) # plot the actual data # now create upper and lower bounds on the uncertainty regions sigmaUpper=np.add(flux,sigma) sigmaLower=np.subtract(flux,sigma) plt.fill_between(wavelength, sigmaLower, sigmaUpper, color='grey', alpha=0.5) plt.show() #TEST radec='223812.39 +213203.4' z=redshift(radec) data=extractor(radec) spec_list=downloader(data) dic = transform_data(spec_list,z) plot_spec(dic, radec, z)
evaluate
A parser which recursively evaluates the custom_variables passed against the expression tree represented by segments, and returns the result. Args: segments(dict): The segments representing the expression tree custom_variables(dict): Key/value pair of variables Returns: bool(result): True or False
# Copyright 2019-2021 Wingify Software Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...enums.file_name_enum import FileNameEnum from .operand_evaluator import OperandEvaluator from ...enums.segments import OperandTypes, OperatorTypes from ...helpers.generic_util import get_key_value FILE = FileNameEnum.Services.SegmentEvaluator class SegmentEvaluator: """ Class to evaluate segments defined in VWO app """ def __init__(self): """ Initializes this class with VWOLogger and OperandEvaluator """ self.operand_evaluator = OperandEvaluator() # MASKED: evaluate function (lines 31-53)
def evaluate(self, segments, custom_variables): """A parser which recursively evaluates the custom_variables passed against the expression tree represented by segments, and returns the result. Args: segments(dict): The segments representing the expression tree custom_variables(dict): Key/value pair of variables Returns: bool(result): True or False """ operator, sub_segments = get_key_value(segments) if operator == OperatorTypes.NOT: return not self.evaluate(sub_segments, custom_variables) elif operator == OperatorTypes.AND: return all(self.evaluate(y, custom_variables) for y in sub_segments) elif operator == OperatorTypes.OR: return any(self.evaluate(y, custom_variables) for y in sub_segments) elif operator == OperandTypes.CUSTOM_VARIABLE: return self.operand_evaluator.evaluate_custom_variable(sub_segments, custom_variables) elif operator == OperandTypes.USER: return self.operand_evaluator.evaluate_user(sub_segments, custom_variables)
31
53
# Copyright 2019-2021 Wingify Software Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...enums.file_name_enum import FileNameEnum from .operand_evaluator import OperandEvaluator from ...enums.segments import OperandTypes, OperatorTypes from ...helpers.generic_util import get_key_value FILE = FileNameEnum.Services.SegmentEvaluator class SegmentEvaluator: """ Class to evaluate segments defined in VWO app """ def __init__(self): """ Initializes this class with VWOLogger and OperandEvaluator """ self.operand_evaluator = OperandEvaluator() def evaluate(self, segments, custom_variables): """A parser which recursively evaluates the custom_variables passed against the expression tree represented by segments, and returns the result. Args: segments(dict): The segments representing the expression tree custom_variables(dict): Key/value pair of variables Returns: bool(result): True or False """ operator, sub_segments = get_key_value(segments) if operator == OperatorTypes.NOT: return not self.evaluate(sub_segments, custom_variables) elif operator == OperatorTypes.AND: return all(self.evaluate(y, custom_variables) for y in sub_segments) elif operator == OperatorTypes.OR: return any(self.evaluate(y, custom_variables) for y in sub_segments) elif operator == OperandTypes.CUSTOM_VARIABLE: return self.operand_evaluator.evaluate_custom_variable(sub_segments, custom_variables) elif operator == OperandTypes.USER: return self.operand_evaluator.evaluate_user(sub_segments, custom_variables)
create_view
Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rest_framework.test import APIRequestFactory from elasticsearch_dsl import Q from rest_framework_elasticsearch.es_filters import ( ESFieldFilter, ElasticOrderingFilter, ElasticFieldsFilter, ElasticFieldsRangeFilter, ElasticSearchFilter) from rest_framework_elasticsearch.es_views import ElasticAPIView from .test_data import DataDocType, DATA from .utils import get_search_ids rf = APIRequestFactory() @pytest.mark.parametrize('dataset, expected', [ ( ('label', 'name', 'description'), ('label', 'name', 'description') ), ( ('label', None, 'description'), ('label', 'label', 'description') ), ( ('label', None, None), ('label', 'label', None) ) ]) def test_es_field_filters(dataset, expected): ffilter = ESFieldFilter(dataset[0], name=dataset[1], description=dataset[2]) assert expected == (ffilter.label, ffilter.name, ffilter.description) class TestElasticOrderingFilter: def setup_method(self): self.backend = ElasticOrderingFilter() # MASKED: create_view function (lines 45-55) @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("first_name", ("is_active", "active")), ("first_name", ("is_active", "active")), ), ( "first_name", ("first_name",) ) ]) def test_get_es_ordering_fields(self, es_ordering_fields, expected): view = self.create_view(es_ordering_fields) result = self.backend.get_es_ordering_fields(view) assert expected == result @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("is_active", "active"), ("is_active", "active") ), ( "first_name", ("first_name", "first_name") ) ]) def test_validation(self, es_ordering_fields, expected): result = ElasticOrderingFilter.validation(es_ordering_fields) assert expected == result def test_get_valid_fields_with_es_ordering_fields(self): es_ordering_fields = ( "first_name", "last_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) expected = [ ("first_name", "first_name"), ("last_name", "last_name"), ("is_active", "active") ] result = self.backend.get_valid_fields([], view) assert result == expected def test_get_valid_fields_without_es_ordering_fields(self): view = self.create_view(None) valid_fields = [] self.backend.get_default_valid_fields = lambda q, v: valid_fields result = self.backend.get_valid_fields([], view) assert result == valid_fields @pytest.mark.parametrize('fields, expected', [ ( ['first_name', 'last_name', '-active'], ['first_name', '-is_active'] ), ( ['+first_name', 'last_name', '#active'], ['+first_name'] ), ( ['+first_name', '-active'], ['+first_name', '-is_active'] ) ]) def test_remove_invalid_fields(self, fields, expected): es_ordering_fields = ( "first_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') result = self.backend.remove_invalid_fields([], fields, view, request) assert result == expected def test_filter_search(self, search): def get_expected(): """Return expected data items sorted by id""" items = [ ( item['_id'], item['_source']['first_name'], item['_source']['is_active'] ) for item in DATA ] items = sorted(items, key=lambda tup: (tup[1], not tup[2])) return items es_ordering_fields = ( ("first_name", "first_name"), ("is_active", "-active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') request.query_params = {'ordering': 'first_name,active'} search = self.backend.filter_search(request, search, view) result = [ (item.meta.id, item.first_name, item.is_active) for item in search[:len(DATA)].execute() ] assert result == get_expected() class TestElasticFieldsFilter: def setup_method(self): self.backend = ElasticFieldsFilter() def create_view(self, es_filter_fields): """Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_filter_fields = es_filter_fields return view def test_get_es_filter_fields(self): es_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_filter_fields) result = self.backend.get_es_filter_fields(view) assert result == es_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('active', 'is_active')], {'active': 'False'}, [3, 6, 8, 10] ), ( [ ESFieldFilter('birthday') ], {'birthday': '1985-03-17T12:20:09'}, [1] ), ( [ESFieldFilter('skills')], {'skills': 'python'}, [1, 4, 5, 10] ), ( [ESFieldFilter('skills')], {'skills': 'python,ruby'}, [1, 4, 5, 6, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('score')], {'score': '200'}, [2, 13] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticFieldsRangeFilter: def setup_method(self): self.backend = ElasticFieldsRangeFilter() def create_view(self, es_range_filter_fields): """Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_range_filter_fields = es_range_filter_fields return view def test_get_es_filter_fields(self): es_range_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_range_filter_fields) result = self.backend.get_es_range_filter_fields(view) assert result == es_range_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('score')], {'from_score': '500'}, [6, 7, 8, 10, 11] ), ( [ESFieldFilter('score')], {'to_score': '100'}, [1, 3, 5, 9, 12, 14] ), ( [ESFieldFilter('score')], {'from_score': '500', 'to_score': '600'}, [7, 8, 10] ), ( [ESFieldFilter('score')], {}, [int(item['_id']) for item in DATA] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticSearchFilter: def setup_method(self): self.backend = ElasticSearchFilter() def create_view(self, es_search_fields): """Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_search_fields = es_search_fields return view @pytest.mark.parametrize('search_param, query_params, expected', [ ( None, {'search': 'test'}, 'test' ), ( 'search', {'search': 'test'}, 'test' ), ( 'q', {'q': 'test'}, 'test' ), ( 'search', {'q': 'test'}, '' ), ]) def test_get_search_query(self, search_param, query_params, expected): request = rf.get('/test/') request.query_params = query_params if search_param: self.backend.search_param = search_param result = self.backend.get_search_query(request) assert result == expected def test_get_es_query(self): class TestElasticSearchFilter(ElasticSearchFilter): def get_es_query(self, s_query, s_fields, **kwargs): return Q("match", query=s_query, field=s_fields) s_query = "test" s_fields = "first_name" backend = TestElasticSearchFilter() result = backend.get_es_query(s_query, s_fields) expected = Q("match", query=s_query, field=s_fields) assert result == expected @pytest.mark.parametrize('es_search_fields, query_params, expected', [ ( ('first_name',), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia Rome'}, [4, 7] ), ( ('description'), {'search': 'information'}, [2] ), ( ('description'), {'search': 'Ford Prefect'}, [2, 8, 10, 5, 6, 12] ), ( ('description'), {'search': 'Earth'}, [5, 3, 14] ), ( ('description'), {'search': 'The Hitchhiker’s Guide'}, [8] ), ]) def test_filter_search(self, search, es_search_fields, query_params, expected): view = self.create_view(es_search_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert result == expected
def create_view(self, es_ordering_fields): """Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_ordering_fields = es_ordering_fields return view
45
55
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rest_framework.test import APIRequestFactory from elasticsearch_dsl import Q from rest_framework_elasticsearch.es_filters import ( ESFieldFilter, ElasticOrderingFilter, ElasticFieldsFilter, ElasticFieldsRangeFilter, ElasticSearchFilter) from rest_framework_elasticsearch.es_views import ElasticAPIView from .test_data import DataDocType, DATA from .utils import get_search_ids rf = APIRequestFactory() @pytest.mark.parametrize('dataset, expected', [ ( ('label', 'name', 'description'), ('label', 'name', 'description') ), ( ('label', None, 'description'), ('label', 'label', 'description') ), ( ('label', None, None), ('label', 'label', None) ) ]) def test_es_field_filters(dataset, expected): ffilter = ESFieldFilter(dataset[0], name=dataset[1], description=dataset[2]) assert expected == (ffilter.label, ffilter.name, ffilter.description) class TestElasticOrderingFilter: def setup_method(self): self.backend = ElasticOrderingFilter() def create_view(self, es_ordering_fields): """Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_ordering_fields = es_ordering_fields return view @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("first_name", ("is_active", "active")), ("first_name", ("is_active", "active")), ), ( "first_name", ("first_name",) ) ]) def test_get_es_ordering_fields(self, es_ordering_fields, expected): view = self.create_view(es_ordering_fields) result = self.backend.get_es_ordering_fields(view) assert expected == result @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("is_active", "active"), ("is_active", "active") ), ( "first_name", ("first_name", "first_name") ) ]) def test_validation(self, es_ordering_fields, expected): result = ElasticOrderingFilter.validation(es_ordering_fields) assert expected == result def test_get_valid_fields_with_es_ordering_fields(self): es_ordering_fields = ( "first_name", "last_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) expected = [ ("first_name", "first_name"), ("last_name", "last_name"), ("is_active", "active") ] result = self.backend.get_valid_fields([], view) assert result == expected def test_get_valid_fields_without_es_ordering_fields(self): view = self.create_view(None) valid_fields = [] self.backend.get_default_valid_fields = lambda q, v: valid_fields result = self.backend.get_valid_fields([], view) assert result == valid_fields @pytest.mark.parametrize('fields, expected', [ ( ['first_name', 'last_name', '-active'], ['first_name', '-is_active'] ), ( ['+first_name', 'last_name', '#active'], ['+first_name'] ), ( ['+first_name', '-active'], ['+first_name', '-is_active'] ) ]) def test_remove_invalid_fields(self, fields, expected): es_ordering_fields = ( "first_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') result = self.backend.remove_invalid_fields([], fields, view, request) assert result == expected def test_filter_search(self, search): def get_expected(): """Return expected data items sorted by id""" items = [ ( item['_id'], item['_source']['first_name'], item['_source']['is_active'] ) for item in DATA ] items = sorted(items, key=lambda tup: (tup[1], not tup[2])) return items es_ordering_fields = ( ("first_name", "first_name"), ("is_active", "-active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') request.query_params = {'ordering': 'first_name,active'} search = self.backend.filter_search(request, search, view) result = [ (item.meta.id, item.first_name, item.is_active) for item in search[:len(DATA)].execute() ] assert result == get_expected() class TestElasticFieldsFilter: def setup_method(self): self.backend = ElasticFieldsFilter() def create_view(self, es_filter_fields): """Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_filter_fields = es_filter_fields return view def test_get_es_filter_fields(self): es_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_filter_fields) result = self.backend.get_es_filter_fields(view) assert result == es_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('active', 'is_active')], {'active': 'False'}, [3, 6, 8, 10] ), ( [ ESFieldFilter('birthday') ], {'birthday': '1985-03-17T12:20:09'}, [1] ), ( [ESFieldFilter('skills')], {'skills': 'python'}, [1, 4, 5, 10] ), ( [ESFieldFilter('skills')], {'skills': 'python,ruby'}, [1, 4, 5, 6, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('score')], {'score': '200'}, [2, 13] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticFieldsRangeFilter: def setup_method(self): self.backend = ElasticFieldsRangeFilter() def create_view(self, es_range_filter_fields): """Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_range_filter_fields = es_range_filter_fields return view def test_get_es_filter_fields(self): es_range_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_range_filter_fields) result = self.backend.get_es_range_filter_fields(view) assert result == es_range_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('score')], {'from_score': '500'}, [6, 7, 8, 10, 11] ), ( [ESFieldFilter('score')], {'to_score': '100'}, [1, 3, 5, 9, 12, 14] ), ( [ESFieldFilter('score')], {'from_score': '500', 'to_score': '600'}, [7, 8, 10] ), ( [ESFieldFilter('score')], {}, [int(item['_id']) for item in DATA] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticSearchFilter: def setup_method(self): self.backend = ElasticSearchFilter() def create_view(self, es_search_fields): """Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_search_fields = es_search_fields return view @pytest.mark.parametrize('search_param, query_params, expected', [ ( None, {'search': 'test'}, 'test' ), ( 'search', {'search': 'test'}, 'test' ), ( 'q', {'q': 'test'}, 'test' ), ( 'search', {'q': 'test'}, '' ), ]) def test_get_search_query(self, search_param, query_params, expected): request = rf.get('/test/') request.query_params = query_params if search_param: self.backend.search_param = search_param result = self.backend.get_search_query(request) assert result == expected def test_get_es_query(self): class TestElasticSearchFilter(ElasticSearchFilter): def get_es_query(self, s_query, s_fields, **kwargs): return Q("match", query=s_query, field=s_fields) s_query = "test" s_fields = "first_name" backend = TestElasticSearchFilter() result = backend.get_es_query(s_query, s_fields) expected = Q("match", query=s_query, field=s_fields) assert result == expected @pytest.mark.parametrize('es_search_fields, query_params, expected', [ ( ('first_name',), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia Rome'}, [4, 7] ), ( ('description'), {'search': 'information'}, [2] ), ( ('description'), {'search': 'Ford Prefect'}, [2, 8, 10, 5, 6, 12] ), ( ('description'), {'search': 'Earth'}, [5, 3, 14] ), ( ('description'), {'search': 'The Hitchhiker’s Guide'}, [8] ), ]) def test_filter_search(self, search, es_search_fields, query_params, expected): view = self.create_view(es_search_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert result == expected
create_view
Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rest_framework.test import APIRequestFactory from elasticsearch_dsl import Q from rest_framework_elasticsearch.es_filters import ( ESFieldFilter, ElasticOrderingFilter, ElasticFieldsFilter, ElasticFieldsRangeFilter, ElasticSearchFilter) from rest_framework_elasticsearch.es_views import ElasticAPIView from .test_data import DataDocType, DATA from .utils import get_search_ids rf = APIRequestFactory() @pytest.mark.parametrize('dataset, expected', [ ( ('label', 'name', 'description'), ('label', 'name', 'description') ), ( ('label', None, 'description'), ('label', 'label', 'description') ), ( ('label', None, None), ('label', 'label', None) ) ]) def test_es_field_filters(dataset, expected): ffilter = ESFieldFilter(dataset[0], name=dataset[1], description=dataset[2]) assert expected == (ffilter.label, ffilter.name, ffilter.description) class TestElasticOrderingFilter: def setup_method(self): self.backend = ElasticOrderingFilter() def create_view(self, es_ordering_fields): """Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_ordering_fields = es_ordering_fields return view @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("first_name", ("is_active", "active")), ("first_name", ("is_active", "active")), ), ( "first_name", ("first_name",) ) ]) def test_get_es_ordering_fields(self, es_ordering_fields, expected): view = self.create_view(es_ordering_fields) result = self.backend.get_es_ordering_fields(view) assert expected == result @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("is_active", "active"), ("is_active", "active") ), ( "first_name", ("first_name", "first_name") ) ]) def test_validation(self, es_ordering_fields, expected): result = ElasticOrderingFilter.validation(es_ordering_fields) assert expected == result def test_get_valid_fields_with_es_ordering_fields(self): es_ordering_fields = ( "first_name", "last_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) expected = [ ("first_name", "first_name"), ("last_name", "last_name"), ("is_active", "active") ] result = self.backend.get_valid_fields([], view) assert result == expected def test_get_valid_fields_without_es_ordering_fields(self): view = self.create_view(None) valid_fields = [] self.backend.get_default_valid_fields = lambda q, v: valid_fields result = self.backend.get_valid_fields([], view) assert result == valid_fields @pytest.mark.parametrize('fields, expected', [ ( ['first_name', 'last_name', '-active'], ['first_name', '-is_active'] ), ( ['+first_name', 'last_name', '#active'], ['+first_name'] ), ( ['+first_name', '-active'], ['+first_name', '-is_active'] ) ]) def test_remove_invalid_fields(self, fields, expected): es_ordering_fields = ( "first_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') result = self.backend.remove_invalid_fields([], fields, view, request) assert result == expected def test_filter_search(self, search): def get_expected(): """Return expected data items sorted by id""" items = [ ( item['_id'], item['_source']['first_name'], item['_source']['is_active'] ) for item in DATA ] items = sorted(items, key=lambda tup: (tup[1], not tup[2])) return items es_ordering_fields = ( ("first_name", "first_name"), ("is_active", "-active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') request.query_params = {'ordering': 'first_name,active'} search = self.backend.filter_search(request, search, view) result = [ (item.meta.id, item.first_name, item.is_active) for item in search[:len(DATA)].execute() ] assert result == get_expected() class TestElasticFieldsFilter: def setup_method(self): self.backend = ElasticFieldsFilter() # MASKED: create_view function (lines 169-180) def test_get_es_filter_fields(self): es_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_filter_fields) result = self.backend.get_es_filter_fields(view) assert result == es_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('active', 'is_active')], {'active': 'False'}, [3, 6, 8, 10] ), ( [ ESFieldFilter('birthday') ], {'birthday': '1985-03-17T12:20:09'}, [1] ), ( [ESFieldFilter('skills')], {'skills': 'python'}, [1, 4, 5, 10] ), ( [ESFieldFilter('skills')], {'skills': 'python,ruby'}, [1, 4, 5, 6, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('score')], {'score': '200'}, [2, 13] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticFieldsRangeFilter: def setup_method(self): self.backend = ElasticFieldsRangeFilter() def create_view(self, es_range_filter_fields): """Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_range_filter_fields = es_range_filter_fields return view def test_get_es_filter_fields(self): es_range_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_range_filter_fields) result = self.backend.get_es_range_filter_fields(view) assert result == es_range_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('score')], {'from_score': '500'}, [6, 7, 8, 10, 11] ), ( [ESFieldFilter('score')], {'to_score': '100'}, [1, 3, 5, 9, 12, 14] ), ( [ESFieldFilter('score')], {'from_score': '500', 'to_score': '600'}, [7, 8, 10] ), ( [ESFieldFilter('score')], {}, [int(item['_id']) for item in DATA] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticSearchFilter: def setup_method(self): self.backend = ElasticSearchFilter() def create_view(self, es_search_fields): """Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_search_fields = es_search_fields return view @pytest.mark.parametrize('search_param, query_params, expected', [ ( None, {'search': 'test'}, 'test' ), ( 'search', {'search': 'test'}, 'test' ), ( 'q', {'q': 'test'}, 'test' ), ( 'search', {'q': 'test'}, '' ), ]) def test_get_search_query(self, search_param, query_params, expected): request = rf.get('/test/') request.query_params = query_params if search_param: self.backend.search_param = search_param result = self.backend.get_search_query(request) assert result == expected def test_get_es_query(self): class TestElasticSearchFilter(ElasticSearchFilter): def get_es_query(self, s_query, s_fields, **kwargs): return Q("match", query=s_query, field=s_fields) s_query = "test" s_fields = "first_name" backend = TestElasticSearchFilter() result = backend.get_es_query(s_query, s_fields) expected = Q("match", query=s_query, field=s_fields) assert result == expected @pytest.mark.parametrize('es_search_fields, query_params, expected', [ ( ('first_name',), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia Rome'}, [4, 7] ), ( ('description'), {'search': 'information'}, [2] ), ( ('description'), {'search': 'Ford Prefect'}, [2, 8, 10, 5, 6, 12] ), ( ('description'), {'search': 'Earth'}, [5, 3, 14] ), ( ('description'), {'search': 'The Hitchhiker’s Guide'}, [8] ), ]) def test_filter_search(self, search, es_search_fields, query_params, expected): view = self.create_view(es_search_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert result == expected
def create_view(self, es_filter_fields): """Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_filter_fields = es_filter_fields return view
169
180
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rest_framework.test import APIRequestFactory from elasticsearch_dsl import Q from rest_framework_elasticsearch.es_filters import ( ESFieldFilter, ElasticOrderingFilter, ElasticFieldsFilter, ElasticFieldsRangeFilter, ElasticSearchFilter) from rest_framework_elasticsearch.es_views import ElasticAPIView from .test_data import DataDocType, DATA from .utils import get_search_ids rf = APIRequestFactory() @pytest.mark.parametrize('dataset, expected', [ ( ('label', 'name', 'description'), ('label', 'name', 'description') ), ( ('label', None, 'description'), ('label', 'label', 'description') ), ( ('label', None, None), ('label', 'label', None) ) ]) def test_es_field_filters(dataset, expected): ffilter = ESFieldFilter(dataset[0], name=dataset[1], description=dataset[2]) assert expected == (ffilter.label, ffilter.name, ffilter.description) class TestElasticOrderingFilter: def setup_method(self): self.backend = ElasticOrderingFilter() def create_view(self, es_ordering_fields): """Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_ordering_fields = es_ordering_fields return view @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("first_name", ("is_active", "active")), ("first_name", ("is_active", "active")), ), ( "first_name", ("first_name",) ) ]) def test_get_es_ordering_fields(self, es_ordering_fields, expected): view = self.create_view(es_ordering_fields) result = self.backend.get_es_ordering_fields(view) assert expected == result @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("is_active", "active"), ("is_active", "active") ), ( "first_name", ("first_name", "first_name") ) ]) def test_validation(self, es_ordering_fields, expected): result = ElasticOrderingFilter.validation(es_ordering_fields) assert expected == result def test_get_valid_fields_with_es_ordering_fields(self): es_ordering_fields = ( "first_name", "last_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) expected = [ ("first_name", "first_name"), ("last_name", "last_name"), ("is_active", "active") ] result = self.backend.get_valid_fields([], view) assert result == expected def test_get_valid_fields_without_es_ordering_fields(self): view = self.create_view(None) valid_fields = [] self.backend.get_default_valid_fields = lambda q, v: valid_fields result = self.backend.get_valid_fields([], view) assert result == valid_fields @pytest.mark.parametrize('fields, expected', [ ( ['first_name', 'last_name', '-active'], ['first_name', '-is_active'] ), ( ['+first_name', 'last_name', '#active'], ['+first_name'] ), ( ['+first_name', '-active'], ['+first_name', '-is_active'] ) ]) def test_remove_invalid_fields(self, fields, expected): es_ordering_fields = ( "first_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') result = self.backend.remove_invalid_fields([], fields, view, request) assert result == expected def test_filter_search(self, search): def get_expected(): """Return expected data items sorted by id""" items = [ ( item['_id'], item['_source']['first_name'], item['_source']['is_active'] ) for item in DATA ] items = sorted(items, key=lambda tup: (tup[1], not tup[2])) return items es_ordering_fields = ( ("first_name", "first_name"), ("is_active", "-active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') request.query_params = {'ordering': 'first_name,active'} search = self.backend.filter_search(request, search, view) result = [ (item.meta.id, item.first_name, item.is_active) for item in search[:len(DATA)].execute() ] assert result == get_expected() class TestElasticFieldsFilter: def setup_method(self): self.backend = ElasticFieldsFilter() def create_view(self, es_filter_fields): """Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_filter_fields = es_filter_fields return view def test_get_es_filter_fields(self): es_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_filter_fields) result = self.backend.get_es_filter_fields(view) assert result == es_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('active', 'is_active')], {'active': 'False'}, [3, 6, 8, 10] ), ( [ ESFieldFilter('birthday') ], {'birthday': '1985-03-17T12:20:09'}, [1] ), ( [ESFieldFilter('skills')], {'skills': 'python'}, [1, 4, 5, 10] ), ( [ESFieldFilter('skills')], {'skills': 'python,ruby'}, [1, 4, 5, 6, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('score')], {'score': '200'}, [2, 13] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticFieldsRangeFilter: def setup_method(self): self.backend = ElasticFieldsRangeFilter() def create_view(self, es_range_filter_fields): """Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_range_filter_fields = es_range_filter_fields return view def test_get_es_filter_fields(self): es_range_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_range_filter_fields) result = self.backend.get_es_range_filter_fields(view) assert result == es_range_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('score')], {'from_score': '500'}, [6, 7, 8, 10, 11] ), ( [ESFieldFilter('score')], {'to_score': '100'}, [1, 3, 5, 9, 12, 14] ), ( [ESFieldFilter('score')], {'from_score': '500', 'to_score': '600'}, [7, 8, 10] ), ( [ESFieldFilter('score')], {}, [int(item['_id']) for item in DATA] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticSearchFilter: def setup_method(self): self.backend = ElasticSearchFilter() def create_view(self, es_search_fields): """Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_search_fields = es_search_fields return view @pytest.mark.parametrize('search_param, query_params, expected', [ ( None, {'search': 'test'}, 'test' ), ( 'search', {'search': 'test'}, 'test' ), ( 'q', {'q': 'test'}, 'test' ), ( 'search', {'q': 'test'}, '' ), ]) def test_get_search_query(self, search_param, query_params, expected): request = rf.get('/test/') request.query_params = query_params if search_param: self.backend.search_param = search_param result = self.backend.get_search_query(request) assert result == expected def test_get_es_query(self): class TestElasticSearchFilter(ElasticSearchFilter): def get_es_query(self, s_query, s_fields, **kwargs): return Q("match", query=s_query, field=s_fields) s_query = "test" s_fields = "first_name" backend = TestElasticSearchFilter() result = backend.get_es_query(s_query, s_fields) expected = Q("match", query=s_query, field=s_fields) assert result == expected @pytest.mark.parametrize('es_search_fields, query_params, expected', [ ( ('first_name',), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia Rome'}, [4, 7] ), ( ('description'), {'search': 'information'}, [2] ), ( ('description'), {'search': 'Ford Prefect'}, [2, 8, 10, 5, 6, 12] ), ( ('description'), {'search': 'Earth'}, [5, 3, 14] ), ( ('description'), {'search': 'The Hitchhiker’s Guide'}, [8] ), ]) def test_filter_search(self, search, es_search_fields, query_params, expected): view = self.create_view(es_search_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert result == expected
create_view
Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rest_framework.test import APIRequestFactory from elasticsearch_dsl import Q from rest_framework_elasticsearch.es_filters import ( ESFieldFilter, ElasticOrderingFilter, ElasticFieldsFilter, ElasticFieldsRangeFilter, ElasticSearchFilter) from rest_framework_elasticsearch.es_views import ElasticAPIView from .test_data import DataDocType, DATA from .utils import get_search_ids rf = APIRequestFactory() @pytest.mark.parametrize('dataset, expected', [ ( ('label', 'name', 'description'), ('label', 'name', 'description') ), ( ('label', None, 'description'), ('label', 'label', 'description') ), ( ('label', None, None), ('label', 'label', None) ) ]) def test_es_field_filters(dataset, expected): ffilter = ESFieldFilter(dataset[0], name=dataset[1], description=dataset[2]) assert expected == (ffilter.label, ffilter.name, ffilter.description) class TestElasticOrderingFilter: def setup_method(self): self.backend = ElasticOrderingFilter() def create_view(self, es_ordering_fields): """Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_ordering_fields = es_ordering_fields return view @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("first_name", ("is_active", "active")), ("first_name", ("is_active", "active")), ), ( "first_name", ("first_name",) ) ]) def test_get_es_ordering_fields(self, es_ordering_fields, expected): view = self.create_view(es_ordering_fields) result = self.backend.get_es_ordering_fields(view) assert expected == result @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("is_active", "active"), ("is_active", "active") ), ( "first_name", ("first_name", "first_name") ) ]) def test_validation(self, es_ordering_fields, expected): result = ElasticOrderingFilter.validation(es_ordering_fields) assert expected == result def test_get_valid_fields_with_es_ordering_fields(self): es_ordering_fields = ( "first_name", "last_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) expected = [ ("first_name", "first_name"), ("last_name", "last_name"), ("is_active", "active") ] result = self.backend.get_valid_fields([], view) assert result == expected def test_get_valid_fields_without_es_ordering_fields(self): view = self.create_view(None) valid_fields = [] self.backend.get_default_valid_fields = lambda q, v: valid_fields result = self.backend.get_valid_fields([], view) assert result == valid_fields @pytest.mark.parametrize('fields, expected', [ ( ['first_name', 'last_name', '-active'], ['first_name', '-is_active'] ), ( ['+first_name', 'last_name', '#active'], ['+first_name'] ), ( ['+first_name', '-active'], ['+first_name', '-is_active'] ) ]) def test_remove_invalid_fields(self, fields, expected): es_ordering_fields = ( "first_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') result = self.backend.remove_invalid_fields([], fields, view, request) assert result == expected def test_filter_search(self, search): def get_expected(): """Return expected data items sorted by id""" items = [ ( item['_id'], item['_source']['first_name'], item['_source']['is_active'] ) for item in DATA ] items = sorted(items, key=lambda tup: (tup[1], not tup[2])) return items es_ordering_fields = ( ("first_name", "first_name"), ("is_active", "-active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') request.query_params = {'ordering': 'first_name,active'} search = self.backend.filter_search(request, search, view) result = [ (item.meta.id, item.first_name, item.is_active) for item in search[:len(DATA)].execute() ] assert result == get_expected() class TestElasticFieldsFilter: def setup_method(self): self.backend = ElasticFieldsFilter() def create_view(self, es_filter_fields): """Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_filter_fields = es_filter_fields return view def test_get_es_filter_fields(self): es_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_filter_fields) result = self.backend.get_es_filter_fields(view) assert result == es_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('active', 'is_active')], {'active': 'False'}, [3, 6, 8, 10] ), ( [ ESFieldFilter('birthday') ], {'birthday': '1985-03-17T12:20:09'}, [1] ), ( [ESFieldFilter('skills')], {'skills': 'python'}, [1, 4, 5, 10] ), ( [ESFieldFilter('skills')], {'skills': 'python,ruby'}, [1, 4, 5, 6, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('score')], {'score': '200'}, [2, 13] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticFieldsRangeFilter: def setup_method(self): self.backend = ElasticFieldsRangeFilter() # MASKED: create_view function (lines 245-256) def test_get_es_filter_fields(self): es_range_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_range_filter_fields) result = self.backend.get_es_range_filter_fields(view) assert result == es_range_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('score')], {'from_score': '500'}, [6, 7, 8, 10, 11] ), ( [ESFieldFilter('score')], {'to_score': '100'}, [1, 3, 5, 9, 12, 14] ), ( [ESFieldFilter('score')], {'from_score': '500', 'to_score': '600'}, [7, 8, 10] ), ( [ESFieldFilter('score')], {}, [int(item['_id']) for item in DATA] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticSearchFilter: def setup_method(self): self.backend = ElasticSearchFilter() def create_view(self, es_search_fields): """Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_search_fields = es_search_fields return view @pytest.mark.parametrize('search_param, query_params, expected', [ ( None, {'search': 'test'}, 'test' ), ( 'search', {'search': 'test'}, 'test' ), ( 'q', {'q': 'test'}, 'test' ), ( 'search', {'q': 'test'}, '' ), ]) def test_get_search_query(self, search_param, query_params, expected): request = rf.get('/test/') request.query_params = query_params if search_param: self.backend.search_param = search_param result = self.backend.get_search_query(request) assert result == expected def test_get_es_query(self): class TestElasticSearchFilter(ElasticSearchFilter): def get_es_query(self, s_query, s_fields, **kwargs): return Q("match", query=s_query, field=s_fields) s_query = "test" s_fields = "first_name" backend = TestElasticSearchFilter() result = backend.get_es_query(s_query, s_fields) expected = Q("match", query=s_query, field=s_fields) assert result == expected @pytest.mark.parametrize('es_search_fields, query_params, expected', [ ( ('first_name',), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia Rome'}, [4, 7] ), ( ('description'), {'search': 'information'}, [2] ), ( ('description'), {'search': 'Ford Prefect'}, [2, 8, 10, 5, 6, 12] ), ( ('description'), {'search': 'Earth'}, [5, 3, 14] ), ( ('description'), {'search': 'The Hitchhiker’s Guide'}, [8] ), ]) def test_filter_search(self, search, es_search_fields, query_params, expected): view = self.create_view(es_search_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert result == expected
def create_view(self, es_range_filter_fields): """Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_range_filter_fields = es_range_filter_fields return view
245
256
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rest_framework.test import APIRequestFactory from elasticsearch_dsl import Q from rest_framework_elasticsearch.es_filters import ( ESFieldFilter, ElasticOrderingFilter, ElasticFieldsFilter, ElasticFieldsRangeFilter, ElasticSearchFilter) from rest_framework_elasticsearch.es_views import ElasticAPIView from .test_data import DataDocType, DATA from .utils import get_search_ids rf = APIRequestFactory() @pytest.mark.parametrize('dataset, expected', [ ( ('label', 'name', 'description'), ('label', 'name', 'description') ), ( ('label', None, 'description'), ('label', 'label', 'description') ), ( ('label', None, None), ('label', 'label', None) ) ]) def test_es_field_filters(dataset, expected): ffilter = ESFieldFilter(dataset[0], name=dataset[1], description=dataset[2]) assert expected == (ffilter.label, ffilter.name, ffilter.description) class TestElasticOrderingFilter: def setup_method(self): self.backend = ElasticOrderingFilter() def create_view(self, es_ordering_fields): """Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_ordering_fields = es_ordering_fields return view @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("first_name", ("is_active", "active")), ("first_name", ("is_active", "active")), ), ( "first_name", ("first_name",) ) ]) def test_get_es_ordering_fields(self, es_ordering_fields, expected): view = self.create_view(es_ordering_fields) result = self.backend.get_es_ordering_fields(view) assert expected == result @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("is_active", "active"), ("is_active", "active") ), ( "first_name", ("first_name", "first_name") ) ]) def test_validation(self, es_ordering_fields, expected): result = ElasticOrderingFilter.validation(es_ordering_fields) assert expected == result def test_get_valid_fields_with_es_ordering_fields(self): es_ordering_fields = ( "first_name", "last_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) expected = [ ("first_name", "first_name"), ("last_name", "last_name"), ("is_active", "active") ] result = self.backend.get_valid_fields([], view) assert result == expected def test_get_valid_fields_without_es_ordering_fields(self): view = self.create_view(None) valid_fields = [] self.backend.get_default_valid_fields = lambda q, v: valid_fields result = self.backend.get_valid_fields([], view) assert result == valid_fields @pytest.mark.parametrize('fields, expected', [ ( ['first_name', 'last_name', '-active'], ['first_name', '-is_active'] ), ( ['+first_name', 'last_name', '#active'], ['+first_name'] ), ( ['+first_name', '-active'], ['+first_name', '-is_active'] ) ]) def test_remove_invalid_fields(self, fields, expected): es_ordering_fields = ( "first_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') result = self.backend.remove_invalid_fields([], fields, view, request) assert result == expected def test_filter_search(self, search): def get_expected(): """Return expected data items sorted by id""" items = [ ( item['_id'], item['_source']['first_name'], item['_source']['is_active'] ) for item in DATA ] items = sorted(items, key=lambda tup: (tup[1], not tup[2])) return items es_ordering_fields = ( ("first_name", "first_name"), ("is_active", "-active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') request.query_params = {'ordering': 'first_name,active'} search = self.backend.filter_search(request, search, view) result = [ (item.meta.id, item.first_name, item.is_active) for item in search[:len(DATA)].execute() ] assert result == get_expected() class TestElasticFieldsFilter: def setup_method(self): self.backend = ElasticFieldsFilter() def create_view(self, es_filter_fields): """Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_filter_fields = es_filter_fields return view def test_get_es_filter_fields(self): es_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_filter_fields) result = self.backend.get_es_filter_fields(view) assert result == es_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('active', 'is_active')], {'active': 'False'}, [3, 6, 8, 10] ), ( [ ESFieldFilter('birthday') ], {'birthday': '1985-03-17T12:20:09'}, [1] ), ( [ESFieldFilter('skills')], {'skills': 'python'}, [1, 4, 5, 10] ), ( [ESFieldFilter('skills')], {'skills': 'python,ruby'}, [1, 4, 5, 6, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('score')], {'score': '200'}, [2, 13] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticFieldsRangeFilter: def setup_method(self): self.backend = ElasticFieldsRangeFilter() def create_view(self, es_range_filter_fields): """Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_range_filter_fields = es_range_filter_fields return view def test_get_es_filter_fields(self): es_range_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_range_filter_fields) result = self.backend.get_es_range_filter_fields(view) assert result == es_range_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('score')], {'from_score': '500'}, [6, 7, 8, 10, 11] ), ( [ESFieldFilter('score')], {'to_score': '100'}, [1, 3, 5, 9, 12, 14] ), ( [ESFieldFilter('score')], {'from_score': '500', 'to_score': '600'}, [7, 8, 10] ), ( [ESFieldFilter('score')], {}, [int(item['_id']) for item in DATA] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticSearchFilter: def setup_method(self): self.backend = ElasticSearchFilter() def create_view(self, es_search_fields): """Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_search_fields = es_search_fields return view @pytest.mark.parametrize('search_param, query_params, expected', [ ( None, {'search': 'test'}, 'test' ), ( 'search', {'search': 'test'}, 'test' ), ( 'q', {'q': 'test'}, 'test' ), ( 'search', {'q': 'test'}, '' ), ]) def test_get_search_query(self, search_param, query_params, expected): request = rf.get('/test/') request.query_params = query_params if search_param: self.backend.search_param = search_param result = self.backend.get_search_query(request) assert result == expected def test_get_es_query(self): class TestElasticSearchFilter(ElasticSearchFilter): def get_es_query(self, s_query, s_fields, **kwargs): return Q("match", query=s_query, field=s_fields) s_query = "test" s_fields = "first_name" backend = TestElasticSearchFilter() result = backend.get_es_query(s_query, s_fields) expected = Q("match", query=s_query, field=s_fields) assert result == expected @pytest.mark.parametrize('es_search_fields, query_params, expected', [ ( ('first_name',), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia Rome'}, [4, 7] ), ( ('description'), {'search': 'information'}, [2] ), ( ('description'), {'search': 'Ford Prefect'}, [2, 8, 10, 5, 6, 12] ), ( ('description'), {'search': 'Earth'}, [5, 3, 14] ), ( ('description'), {'search': 'The Hitchhiker’s Guide'}, [8] ), ]) def test_filter_search(self, search, es_search_fields, query_params, expected): view = self.create_view(es_search_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert result == expected
create_view
Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rest_framework.test import APIRequestFactory from elasticsearch_dsl import Q from rest_framework_elasticsearch.es_filters import ( ESFieldFilter, ElasticOrderingFilter, ElasticFieldsFilter, ElasticFieldsRangeFilter, ElasticSearchFilter) from rest_framework_elasticsearch.es_views import ElasticAPIView from .test_data import DataDocType, DATA from .utils import get_search_ids rf = APIRequestFactory() @pytest.mark.parametrize('dataset, expected', [ ( ('label', 'name', 'description'), ('label', 'name', 'description') ), ( ('label', None, 'description'), ('label', 'label', 'description') ), ( ('label', None, None), ('label', 'label', None) ) ]) def test_es_field_filters(dataset, expected): ffilter = ESFieldFilter(dataset[0], name=dataset[1], description=dataset[2]) assert expected == (ffilter.label, ffilter.name, ffilter.description) class TestElasticOrderingFilter: def setup_method(self): self.backend = ElasticOrderingFilter() def create_view(self, es_ordering_fields): """Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_ordering_fields = es_ordering_fields return view @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("first_name", ("is_active", "active")), ("first_name", ("is_active", "active")), ), ( "first_name", ("first_name",) ) ]) def test_get_es_ordering_fields(self, es_ordering_fields, expected): view = self.create_view(es_ordering_fields) result = self.backend.get_es_ordering_fields(view) assert expected == result @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("is_active", "active"), ("is_active", "active") ), ( "first_name", ("first_name", "first_name") ) ]) def test_validation(self, es_ordering_fields, expected): result = ElasticOrderingFilter.validation(es_ordering_fields) assert expected == result def test_get_valid_fields_with_es_ordering_fields(self): es_ordering_fields = ( "first_name", "last_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) expected = [ ("first_name", "first_name"), ("last_name", "last_name"), ("is_active", "active") ] result = self.backend.get_valid_fields([], view) assert result == expected def test_get_valid_fields_without_es_ordering_fields(self): view = self.create_view(None) valid_fields = [] self.backend.get_default_valid_fields = lambda q, v: valid_fields result = self.backend.get_valid_fields([], view) assert result == valid_fields @pytest.mark.parametrize('fields, expected', [ ( ['first_name', 'last_name', '-active'], ['first_name', '-is_active'] ), ( ['+first_name', 'last_name', '#active'], ['+first_name'] ), ( ['+first_name', '-active'], ['+first_name', '-is_active'] ) ]) def test_remove_invalid_fields(self, fields, expected): es_ordering_fields = ( "first_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') result = self.backend.remove_invalid_fields([], fields, view, request) assert result == expected def test_filter_search(self, search): def get_expected(): """Return expected data items sorted by id""" items = [ ( item['_id'], item['_source']['first_name'], item['_source']['is_active'] ) for item in DATA ] items = sorted(items, key=lambda tup: (tup[1], not tup[2])) return items es_ordering_fields = ( ("first_name", "first_name"), ("is_active", "-active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') request.query_params = {'ordering': 'first_name,active'} search = self.backend.filter_search(request, search, view) result = [ (item.meta.id, item.first_name, item.is_active) for item in search[:len(DATA)].execute() ] assert result == get_expected() class TestElasticFieldsFilter: def setup_method(self): self.backend = ElasticFieldsFilter() def create_view(self, es_filter_fields): """Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_filter_fields = es_filter_fields return view def test_get_es_filter_fields(self): es_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_filter_fields) result = self.backend.get_es_filter_fields(view) assert result == es_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('active', 'is_active')], {'active': 'False'}, [3, 6, 8, 10] ), ( [ ESFieldFilter('birthday') ], {'birthday': '1985-03-17T12:20:09'}, [1] ), ( [ESFieldFilter('skills')], {'skills': 'python'}, [1, 4, 5, 10] ), ( [ESFieldFilter('skills')], {'skills': 'python,ruby'}, [1, 4, 5, 6, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('score')], {'score': '200'}, [2, 13] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticFieldsRangeFilter: def setup_method(self): self.backend = ElasticFieldsRangeFilter() def create_view(self, es_range_filter_fields): """Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_range_filter_fields = es_range_filter_fields return view def test_get_es_filter_fields(self): es_range_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_range_filter_fields) result = self.backend.get_es_range_filter_fields(view) assert result == es_range_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('score')], {'from_score': '500'}, [6, 7, 8, 10, 11] ), ( [ESFieldFilter('score')], {'to_score': '100'}, [1, 3, 5, 9, 12, 14] ), ( [ESFieldFilter('score')], {'from_score': '500', 'to_score': '600'}, [7, 8, 10] ), ( [ESFieldFilter('score')], {}, [int(item['_id']) for item in DATA] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticSearchFilter: def setup_method(self): self.backend = ElasticSearchFilter() # MASKED: create_view function (lines 302-313) @pytest.mark.parametrize('search_param, query_params, expected', [ ( None, {'search': 'test'}, 'test' ), ( 'search', {'search': 'test'}, 'test' ), ( 'q', {'q': 'test'}, 'test' ), ( 'search', {'q': 'test'}, '' ), ]) def test_get_search_query(self, search_param, query_params, expected): request = rf.get('/test/') request.query_params = query_params if search_param: self.backend.search_param = search_param result = self.backend.get_search_query(request) assert result == expected def test_get_es_query(self): class TestElasticSearchFilter(ElasticSearchFilter): def get_es_query(self, s_query, s_fields, **kwargs): return Q("match", query=s_query, field=s_fields) s_query = "test" s_fields = "first_name" backend = TestElasticSearchFilter() result = backend.get_es_query(s_query, s_fields) expected = Q("match", query=s_query, field=s_fields) assert result == expected @pytest.mark.parametrize('es_search_fields, query_params, expected', [ ( ('first_name',), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia Rome'}, [4, 7] ), ( ('description'), {'search': 'information'}, [2] ), ( ('description'), {'search': 'Ford Prefect'}, [2, 8, 10, 5, 6, 12] ), ( ('description'), {'search': 'Earth'}, [5, 3, 14] ), ( ('description'), {'search': 'The Hitchhiker’s Guide'}, [8] ), ]) def test_filter_search(self, search, es_search_fields, query_params, expected): view = self.create_view(es_search_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert result == expected
def create_view(self, es_search_fields): """Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_search_fields = es_search_fields return view
302
313
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rest_framework.test import APIRequestFactory from elasticsearch_dsl import Q from rest_framework_elasticsearch.es_filters import ( ESFieldFilter, ElasticOrderingFilter, ElasticFieldsFilter, ElasticFieldsRangeFilter, ElasticSearchFilter) from rest_framework_elasticsearch.es_views import ElasticAPIView from .test_data import DataDocType, DATA from .utils import get_search_ids rf = APIRequestFactory() @pytest.mark.parametrize('dataset, expected', [ ( ('label', 'name', 'description'), ('label', 'name', 'description') ), ( ('label', None, 'description'), ('label', 'label', 'description') ), ( ('label', None, None), ('label', 'label', None) ) ]) def test_es_field_filters(dataset, expected): ffilter = ESFieldFilter(dataset[0], name=dataset[1], description=dataset[2]) assert expected == (ffilter.label, ffilter.name, ffilter.description) class TestElasticOrderingFilter: def setup_method(self): self.backend = ElasticOrderingFilter() def create_view(self, es_ordering_fields): """Create and return test view class instance Args: es_ordering_fields (tuple): ordering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_ordering_fields = es_ordering_fields return view @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("first_name", ("is_active", "active")), ("first_name", ("is_active", "active")), ), ( "first_name", ("first_name",) ) ]) def test_get_es_ordering_fields(self, es_ordering_fields, expected): view = self.create_view(es_ordering_fields) result = self.backend.get_es_ordering_fields(view) assert expected == result @pytest.mark.parametrize('es_ordering_fields, expected', [ ( ("is_active", "active"), ("is_active", "active") ), ( "first_name", ("first_name", "first_name") ) ]) def test_validation(self, es_ordering_fields, expected): result = ElasticOrderingFilter.validation(es_ordering_fields) assert expected == result def test_get_valid_fields_with_es_ordering_fields(self): es_ordering_fields = ( "first_name", "last_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) expected = [ ("first_name", "first_name"), ("last_name", "last_name"), ("is_active", "active") ] result = self.backend.get_valid_fields([], view) assert result == expected def test_get_valid_fields_without_es_ordering_fields(self): view = self.create_view(None) valid_fields = [] self.backend.get_default_valid_fields = lambda q, v: valid_fields result = self.backend.get_valid_fields([], view) assert result == valid_fields @pytest.mark.parametrize('fields, expected', [ ( ['first_name', 'last_name', '-active'], ['first_name', '-is_active'] ), ( ['+first_name', 'last_name', '#active'], ['+first_name'] ), ( ['+first_name', '-active'], ['+first_name', '-is_active'] ) ]) def test_remove_invalid_fields(self, fields, expected): es_ordering_fields = ( "first_name", ("is_active", "active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') result = self.backend.remove_invalid_fields([], fields, view, request) assert result == expected def test_filter_search(self, search): def get_expected(): """Return expected data items sorted by id""" items = [ ( item['_id'], item['_source']['first_name'], item['_source']['is_active'] ) for item in DATA ] items = sorted(items, key=lambda tup: (tup[1], not tup[2])) return items es_ordering_fields = ( ("first_name", "first_name"), ("is_active", "-active") ) view = self.create_view(es_ordering_fields) request = rf.get('/test/') request.query_params = {'ordering': 'first_name,active'} search = self.backend.filter_search(request, search, view) result = [ (item.meta.id, item.first_name, item.is_active) for item in search[:len(DATA)].execute() ] assert result == get_expected() class TestElasticFieldsFilter: def setup_method(self): self.backend = ElasticFieldsFilter() def create_view(self, es_filter_fields): """Create and return test view class instance Args: es_filter_fields ([ESFieldFilter]): filtering fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_filter_fields = es_filter_fields return view def test_get_es_filter_fields(self): es_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_filter_fields) result = self.backend.get_es_filter_fields(view) assert result == es_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('active', 'is_active')], {'active': 'False'}, [3, 6, 8, 10] ), ( [ ESFieldFilter('birthday') ], {'birthday': '1985-03-17T12:20:09'}, [1] ), ( [ESFieldFilter('skills')], {'skills': 'python'}, [1, 4, 5, 10] ), ( [ESFieldFilter('skills')], {'skills': 'python,ruby'}, [1, 4, 5, 6, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('active', 'is_active')], {'active': 'False', 'skills': 'python'}, [3, 6, 8, 10] ), ( [ESFieldFilter('score')], {'score': '200'}, [2, 13] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticFieldsRangeFilter: def setup_method(self): self.backend = ElasticFieldsRangeFilter() def create_view(self, es_range_filter_fields): """Create and return test view class instance Args: es_range_filter_fields ([ESFieldFilter]): filtering range fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_range_filter_fields = es_range_filter_fields return view def test_get_es_filter_fields(self): es_range_filter_fields = ( ESFieldFilter('skills'), ESFieldFilter('active', 'is_active') ) view = self.create_view(es_range_filter_fields) result = self.backend.get_es_range_filter_fields(view) assert result == es_range_filter_fields @pytest.mark.parametrize('es_filter_fields, query_params, expected', [ ( [ESFieldFilter('score')], {'from_score': '500'}, [6, 7, 8, 10, 11] ), ( [ESFieldFilter('score')], {'to_score': '100'}, [1, 3, 5, 9, 12, 14] ), ( [ESFieldFilter('score')], {'from_score': '500', 'to_score': '600'}, [7, 8, 10] ), ( [ESFieldFilter('score')], {}, [int(item['_id']) for item in DATA] ), ]) def test_filter_search(self, search, es_filter_fields, query_params, expected): view = self.create_view(es_filter_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert sorted(result) == sorted(expected) class TestElasticSearchFilter: def setup_method(self): self.backend = ElasticSearchFilter() def create_view(self, es_search_fields): """Create and return test view class instance Args: es_search_fields ([ESFieldFilter]): search fields Returns: ElasticAPIView: test view instance """ view = ElasticAPIView() view.es_model = DataDocType view.es_search_fields = es_search_fields return view @pytest.mark.parametrize('search_param, query_params, expected', [ ( None, {'search': 'test'}, 'test' ), ( 'search', {'search': 'test'}, 'test' ), ( 'q', {'q': 'test'}, 'test' ), ( 'search', {'q': 'test'}, '' ), ]) def test_get_search_query(self, search_param, query_params, expected): request = rf.get('/test/') request.query_params = query_params if search_param: self.backend.search_param = search_param result = self.backend.get_search_query(request) assert result == expected def test_get_es_query(self): class TestElasticSearchFilter(ElasticSearchFilter): def get_es_query(self, s_query, s_fields, **kwargs): return Q("match", query=s_query, field=s_fields) s_query = "test" s_fields = "first_name" backend = TestElasticSearchFilter() result = backend.get_es_query(s_query, s_fields) expected = Q("match", query=s_query, field=s_fields) assert result == expected @pytest.mark.parametrize('es_search_fields, query_params, expected', [ ( ('first_name',), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia'}, [1] ), ( ('first_name', 'last_name', 'city'), {'search': 'Zofia Rome'}, [4, 7] ), ( ('description'), {'search': 'information'}, [2] ), ( ('description'), {'search': 'Ford Prefect'}, [2, 8, 10, 5, 6, 12] ), ( ('description'), {'search': 'Earth'}, [5, 3, 14] ), ( ('description'), {'search': 'The Hitchhiker’s Guide'}, [8] ), ]) def test_filter_search(self, search, es_search_fields, query_params, expected): view = self.create_view(es_search_fields) request = rf.get('/test/') request.query_params = query_params search = self.backend.filter_search(request, search, view) result = get_search_ids(search) assert result == expected
initialize
Initialize inference algorithm. Args: step_size: float, optional. Constant scale factor of learning rate. friction: float, optional. Constant scale on the friction term in the Hamiltonian system.
from __future__ import absolute_import from __future__ import division from __future__ import print_function import six import tensorflow as tf from edward.inferences.monte_carlo import MonteCarlo from edward.models import RandomVariable, Empirical from edward.util import copy try: from edward.models import Normal except Exception as e: raise ImportError("{0}. Your TensorFlow version is not supported.".format(e)) class SGHMC(MonteCarlo): """Stochastic gradient Hamiltonian Monte Carlo (Chen et al., 2014). #### Notes In conditional inference, we infer $z$ in $p(z, \\beta \mid x)$ while fixing inference over $\\beta$ using another distribution $q(\\beta)$. `SGHMC` substitutes the model's log marginal density $\log p(x, z) = \log \mathbb{E}_{q(\\beta)} [ p(x, z, \\beta) ] \\approx \log p(x, z, \\beta^*)$ leveraging a single Monte Carlo sample, where $\\beta^* \sim q(\\beta)$. This is unbiased (and therefore asymptotically exact as a pseudo-marginal method) if $q(\\beta) = p(\\beta \mid x)$. #### Examples ```python mu = Normal(loc=0.0, scale=1.0) x = Normal(loc=mu, scale=1.0, sample_shape=10) qmu = Empirical(tf.Variable(tf.zeros(500))) inference = ed.SGHMC({mu: qmu}, {x: np.zeros(10, dtype=np.float32)}) ``` """ def __init__(self, *args, **kwargs): super(SGHMC, self).__init__(*args, **kwargs) # MASKED: initialize function (lines 48-61) def build_update(self): """Simulate Hamiltonian dynamics with friction using a discretized integrator. Its discretization error goes to zero as the learning rate decreases. Implements the update equations from (15) of Chen et al. (2014). """ old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0)) for z, qz in six.iteritems(self.latent_vars)} old_v_sample = {z: v for z, v in six.iteritems(self.v)} # Simulate Hamiltonian dynamics with friction. friction = tf.constant(self.friction, dtype=tf.float32) learning_rate = tf.constant(self.step_size * 0.01, dtype=tf.float32) grad_log_joint = tf.gradients(self._log_joint(old_sample), list(six.itervalues(old_sample))) # v_sample is so named b/c it represents a velocity rather than momentum. sample = {} v_sample = {} for z, grad_log_p in zip(six.iterkeys(old_sample), grad_log_joint): qz = self.latent_vars[z] event_shape = qz.event_shape normal = Normal(loc=tf.zeros(event_shape), scale=(tf.sqrt(learning_rate * friction) * tf.ones(event_shape))) sample[z] = old_sample[z] + old_v_sample[z] v_sample[z] = ((1. - 0.5 * friction) * old_v_sample[z] + learning_rate * tf.convert_to_tensor(grad_log_p) + normal.sample()) # Update Empirical random variables. assign_ops = [] for z, qz in six.iteritems(self.latent_vars): variable = qz.get_variables()[0] assign_ops.append(tf.scatter_update(variable, self.t, sample[z])) assign_ops.append(tf.assign(self.v[z], v_sample[z]).op) # Increment n_accept. assign_ops.append(self.n_accept.assign_add(1)) return tf.group(*assign_ops) def _log_joint(self, z_sample): """Utility function to calculate model's log joint density, log p(x, z), for inputs z (and fixed data x). Args: z_sample: dict. Latent variable keys to samples. """ scope = tf.get_default_graph().unique_name("inference") # Form dictionary in order to replace conditioning on prior or # observed variable with conditioning on a specific value. dict_swap = z_sample.copy() for x, qx in six.iteritems(self.data): if isinstance(x, RandomVariable): if isinstance(qx, RandomVariable): qx_copy = copy(qx, scope=scope) dict_swap[x] = qx_copy.value() else: dict_swap[x] = qx log_joint = 0.0 for z in six.iterkeys(self.latent_vars): z_copy = copy(z, dict_swap, scope=scope) log_joint += tf.reduce_sum( self.scale.get(z, 1.0) * z_copy.log_prob(dict_swap[z])) for x in six.iterkeys(self.data): if isinstance(x, RandomVariable): x_copy = copy(x, dict_swap, scope=scope) log_joint += tf.reduce_sum( self.scale.get(x, 1.0) * x_copy.log_prob(dict_swap[x])) return log_joint
def initialize(self, step_size=0.25, friction=0.1, *args, **kwargs): """Initialize inference algorithm. Args: step_size: float, optional. Constant scale factor of learning rate. friction: float, optional. Constant scale on the friction term in the Hamiltonian system. """ self.step_size = step_size self.friction = friction self.v = {z: tf.Variable(tf.zeros(qz.params.shape[1:])) for z, qz in six.iteritems(self.latent_vars)} return super(SGHMC, self).initialize(*args, **kwargs)
48
61
from __future__ import absolute_import from __future__ import division from __future__ import print_function import six import tensorflow as tf from edward.inferences.monte_carlo import MonteCarlo from edward.models import RandomVariable, Empirical from edward.util import copy try: from edward.models import Normal except Exception as e: raise ImportError("{0}. Your TensorFlow version is not supported.".format(e)) class SGHMC(MonteCarlo): """Stochastic gradient Hamiltonian Monte Carlo (Chen et al., 2014). #### Notes In conditional inference, we infer $z$ in $p(z, \\beta \mid x)$ while fixing inference over $\\beta$ using another distribution $q(\\beta)$. `SGHMC` substitutes the model's log marginal density $\log p(x, z) = \log \mathbb{E}_{q(\\beta)} [ p(x, z, \\beta) ] \\approx \log p(x, z, \\beta^*)$ leveraging a single Monte Carlo sample, where $\\beta^* \sim q(\\beta)$. This is unbiased (and therefore asymptotically exact as a pseudo-marginal method) if $q(\\beta) = p(\\beta \mid x)$. #### Examples ```python mu = Normal(loc=0.0, scale=1.0) x = Normal(loc=mu, scale=1.0, sample_shape=10) qmu = Empirical(tf.Variable(tf.zeros(500))) inference = ed.SGHMC({mu: qmu}, {x: np.zeros(10, dtype=np.float32)}) ``` """ def __init__(self, *args, **kwargs): super(SGHMC, self).__init__(*args, **kwargs) def initialize(self, step_size=0.25, friction=0.1, *args, **kwargs): """Initialize inference algorithm. Args: step_size: float, optional. Constant scale factor of learning rate. friction: float, optional. Constant scale on the friction term in the Hamiltonian system. """ self.step_size = step_size self.friction = friction self.v = {z: tf.Variable(tf.zeros(qz.params.shape[1:])) for z, qz in six.iteritems(self.latent_vars)} return super(SGHMC, self).initialize(*args, **kwargs) def build_update(self): """Simulate Hamiltonian dynamics with friction using a discretized integrator. Its discretization error goes to zero as the learning rate decreases. Implements the update equations from (15) of Chen et al. (2014). """ old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0)) for z, qz in six.iteritems(self.latent_vars)} old_v_sample = {z: v for z, v in six.iteritems(self.v)} # Simulate Hamiltonian dynamics with friction. friction = tf.constant(self.friction, dtype=tf.float32) learning_rate = tf.constant(self.step_size * 0.01, dtype=tf.float32) grad_log_joint = tf.gradients(self._log_joint(old_sample), list(six.itervalues(old_sample))) # v_sample is so named b/c it represents a velocity rather than momentum. sample = {} v_sample = {} for z, grad_log_p in zip(six.iterkeys(old_sample), grad_log_joint): qz = self.latent_vars[z] event_shape = qz.event_shape normal = Normal(loc=tf.zeros(event_shape), scale=(tf.sqrt(learning_rate * friction) * tf.ones(event_shape))) sample[z] = old_sample[z] + old_v_sample[z] v_sample[z] = ((1. - 0.5 * friction) * old_v_sample[z] + learning_rate * tf.convert_to_tensor(grad_log_p) + normal.sample()) # Update Empirical random variables. assign_ops = [] for z, qz in six.iteritems(self.latent_vars): variable = qz.get_variables()[0] assign_ops.append(tf.scatter_update(variable, self.t, sample[z])) assign_ops.append(tf.assign(self.v[z], v_sample[z]).op) # Increment n_accept. assign_ops.append(self.n_accept.assign_add(1)) return tf.group(*assign_ops) def _log_joint(self, z_sample): """Utility function to calculate model's log joint density, log p(x, z), for inputs z (and fixed data x). Args: z_sample: dict. Latent variable keys to samples. """ scope = tf.get_default_graph().unique_name("inference") # Form dictionary in order to replace conditioning on prior or # observed variable with conditioning on a specific value. dict_swap = z_sample.copy() for x, qx in six.iteritems(self.data): if isinstance(x, RandomVariable): if isinstance(qx, RandomVariable): qx_copy = copy(qx, scope=scope) dict_swap[x] = qx_copy.value() else: dict_swap[x] = qx log_joint = 0.0 for z in six.iterkeys(self.latent_vars): z_copy = copy(z, dict_swap, scope=scope) log_joint += tf.reduce_sum( self.scale.get(z, 1.0) * z_copy.log_prob(dict_swap[z])) for x in six.iterkeys(self.data): if isinstance(x, RandomVariable): x_copy = copy(x, dict_swap, scope=scope) log_joint += tf.reduce_sum( self.scale.get(x, 1.0) * x_copy.log_prob(dict_swap[x])) return log_joint
generate
Generate the next neural architecture. Args: multiprocessing_queue: the Queue for multiprocessing return value. Returns: list of 2-element tuples: generated_graph and other_info, for random searcher the length of list is 1. generated_graph: An instance of Graph. other_info: Anything to be saved in the training queue together with the architecture.
from random import randrange from autokeras.bayesian import SearchTree, contain from autokeras.net_transformer import transform from autokeras.search import Searcher class RandomSearcher(Searcher): """ Class to search for neural architectures using Random search strategy. Attributes: search_tree: The network morphism search tree """ def __init__(self, n_output_node, input_shape, path, metric, loss, generators, verbose, trainer_args=None, default_model_len=None, default_model_width=None): super(RandomSearcher, self).__init__(n_output_node, input_shape, path, metric, loss, generators, verbose, trainer_args, default_model_len, default_model_width) self.search_tree = SearchTree() # MASKED: generate function (lines 24-52) def update(self, other_info, model_id, *args): """ Update the controller with evaluation result of a neural architecture. Args: other_info: Anything. In our case it is the father ID in the search tree. model_id: An integer. """ father_id = other_info self.search_tree.add_child(father_id, model_id)
def generate(self, multiprocessing_queue): """Generate the next neural architecture. Args: multiprocessing_queue: the Queue for multiprocessing return value. Returns: list of 2-element tuples: generated_graph and other_info, for random searcher the length of list is 1. generated_graph: An instance of Graph. other_info: Anything to be saved in the training queue together with the architecture. """ random_index = randrange(len(self.history)) model_id = self.history[random_index]['model_id'] graph = self.load_model_by_id(model_id) new_father_id = None generated_graph = None for temp_graph in transform(graph): if not contain(self.descriptors, temp_graph.extract_descriptor()): new_father_id = model_id generated_graph = temp_graph break if new_father_id is None: new_father_id = 0 generated_graph = self.generators[0](self.n_classes, self.input_shape). \ generate(self.default_model_len, self.default_model_width) return [(generated_graph, new_father_id)]
24
52
from random import randrange from autokeras.bayesian import SearchTree, contain from autokeras.net_transformer import transform from autokeras.search import Searcher class RandomSearcher(Searcher): """ Class to search for neural architectures using Random search strategy. Attributes: search_tree: The network morphism search tree """ def __init__(self, n_output_node, input_shape, path, metric, loss, generators, verbose, trainer_args=None, default_model_len=None, default_model_width=None): super(RandomSearcher, self).__init__(n_output_node, input_shape, path, metric, loss, generators, verbose, trainer_args, default_model_len, default_model_width) self.search_tree = SearchTree() def generate(self, multiprocessing_queue): """Generate the next neural architecture. Args: multiprocessing_queue: the Queue for multiprocessing return value. Returns: list of 2-element tuples: generated_graph and other_info, for random searcher the length of list is 1. generated_graph: An instance of Graph. other_info: Anything to be saved in the training queue together with the architecture. """ random_index = randrange(len(self.history)) model_id = self.history[random_index]['model_id'] graph = self.load_model_by_id(model_id) new_father_id = None generated_graph = None for temp_graph in transform(graph): if not contain(self.descriptors, temp_graph.extract_descriptor()): new_father_id = model_id generated_graph = temp_graph break if new_father_id is None: new_father_id = 0 generated_graph = self.generators[0](self.n_classes, self.input_shape). \ generate(self.default_model_len, self.default_model_width) return [(generated_graph, new_father_id)] def update(self, other_info, model_id, *args): """ Update the controller with evaluation result of a neural architecture. Args: other_info: Anything. In our case it is the father ID in the search tree. model_id: An integer. """ father_id = other_info self.search_tree.add_child(father_id, model_id)
get_transport_class
Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport # MASKED: get_transport_class function (lines 59-75) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values()))
59
75
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
_get_default_mtls_endpoint
Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ # MASKED: _get_default_mtls_endpoint function (lines 83-110) DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
@staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
83
110
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
from_service_account_file
Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) # MASKED: from_service_account_file function (lines 117-133) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
@classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs)
117
133
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
create_job
Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) # MASKED: create_job function (lines 243-332) def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response
243
332
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
get_job
Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response # MASKED: get_job function (lines 437-517) def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response
437
517
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
list_jobs
Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # MASKED: list_jobs function (lines 863-976) def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response
863
976
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
search_jobs
Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response # MASKED: search_jobs function (lines 978-1042) def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response
978
1,042
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
search_jobs_for_alert
Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response # MASKED: search_jobs_for_alert function (lines 1044-1114) try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response
1,044
1,114
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation from google.api_core import operation_async from google.cloud.talent_v4beta1.services.job_service import pagers from google.cloud.talent_v4beta1.types import common from google.cloud.talent_v4beta1.types import job from google.cloud.talent_v4beta1.types import job as gct_job from google.cloud.talent_v4beta1.types import job_service from google.protobuf import timestamp_pb2 as timestamp # type: ignore from .transports.base import JobServiceTransport from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport class JobServiceClientMeta(type): """Metaclass for the JobService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] _transport_registry["grpc"] = JobServiceGrpcTransport _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class JobServiceClient(metaclass=JobServiceClientMeta): """A service handles job management, including job CRUD, enumeration and search. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "jobs.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: {@api.name}: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @staticmethod def job_path(project: str, tenant: str, job: str,) -> str: """Return a fully-qualified job string.""" return "projects/{project}/tenants/{tenant}/jobs/{job}".format( project=project, tenant=tenant, job=job, ) @staticmethod def parse_job_path(path: str) -> Dict[str, str]: """Parse a job path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/jobs/(?P<job>.+?)$", path, ) return m.groupdict() if m else {} def __init__( self, *, credentials: credentials.Credentials = None, transport: Union[str, JobServiceTransport] = None, client_options: ClientOptions = None, ) -> None: """Instantiate the job service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.JobServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint, this is the default value for the environment variable) and "auto" (auto switch to the default mTLS endpoint if client SSL credentials is present). However, the ``api_endpoint`` property takes precedence if provided. (2) The ``client_cert_source`` property is used to provide client SSL credentials for mutual TLS transport. If not provided, the default SSL credentials will be used if present. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = ClientOptions.from_dict(client_options) if client_options is None: client_options = ClientOptions.ClientOptions() if client_options.api_endpoint is None: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") if use_mtls_env == "never": client_options.api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": has_client_cert_source = ( client_options.client_cert_source is not None or mtls.has_default_client_cert_source() ) client_options.api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if has_client_cert_source else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, JobServiceTransport): # transport is a JobServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, " "provide its scopes directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=client_options.api_endpoint, scopes=client_options.scopes, api_mtls_endpoint=client_options.api_endpoint, client_cert_source=client_options.client_cert_source, quota_project_id=client_options.quota_project_id, ) def create_job( self, request: job_service.CreateJobRequest = None, *, parent: str = None, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.CreateJobRequest`): The request object. Create job request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job (:class:`~.gct_job.Job`): Required. The Job to be created. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.CreateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.CreateJobRequest): request = job_service.CreateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_create_jobs( self, request: job_service.BatchCreateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch create jobs operation. Args: request (:class:`~.job_service.BatchCreateJobsRequest`): The request object. Request to create a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be created. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchCreateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchCreateJobsRequest): request = job_service.BatchCreateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_create_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def get_job( self, request: job_service.GetJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> job.Job: r"""Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days. Args: request (:class:`~.job_service.GetJobRequest`): The request object. Get job request. name (:class:`str`): Required. The resource name of the job to retrieve. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.GetJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.GetJobRequest): request = job_service.GetJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def update_job( self, request: job_service.UpdateJobRequest = None, *, job: gct_job.Job = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gct_job.Job: r"""Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.UpdateJobRequest`): The request object. Update job request. job (:class:`~.gct_job.Job`): Required. The Job to be updated. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.gct_job.Job: A Job resource represents a job posting (also referred to as a "job listing" or "job requisition"). A job belongs to a [Company][google.cloud.talent.v4beta1.Company], which is the hiring entity responsible for the job. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([job]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.UpdateJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.UpdateJobRequest): request = job_service.UpdateJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if job is not None: request.job = job # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("job.name", request.job.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def batch_update_jobs( self, request: job_service.BatchUpdateJobsRequest = None, *, parent: str = None, jobs: Sequence[job.Job] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Begins executing a batch update jobs operation. Args: request (:class:`~.job_service.BatchUpdateJobsRequest`): The request object. Request to update a batch of jobs. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. jobs (:class:`Sequence[~.job.Job]`): Required. The jobs to be updated. This corresponds to the ``jobs`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operation.Operation: An object representing a long-running operation. The result type for the operation will be :class:``~.job_service.JobOperationResult``: The result of [JobService.BatchCreateJobs][google.cloud.talent.v4beta1.JobService.BatchCreateJobs] or [JobService.BatchUpdateJobs][google.cloud.talent.v4beta1.JobService.BatchUpdateJobs] APIs. It's used to replace [google.longrunning.Operation.response][google.longrunning.Operation.response] in case of success. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, jobs]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchUpdateJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchUpdateJobsRequest): request = job_service.BatchUpdateJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if jobs is not None: request.jobs = jobs # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_update_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Wrap the response in an operation future. response = operation.from_gapic( response, self._transport.operations_client, job_service.JobOperationResult, metadata_type=common.BatchOperationMetadata, ) # Done; return the response. return response def delete_job( self, request: job_service.DeleteJobRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes. Args: request (:class:`~.job_service.DeleteJobRequest`): The request object. Delete job request. name (:class:`str`): Required. The resource name of the job to be deleted. The format is "projects/{project_id}/tenants/{tenant_id}/jobs/{job_id}". For example, "projects/foo/tenants/bar/jobs/baz". If tenant id is unspecified, the default tenant is used. For example, "projects/foo/jobs/bar". This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.DeleteJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.DeleteJobRequest): request = job_service.DeleteJobRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_job] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def batch_delete_jobs( self, request: job_service.BatchDeleteJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by filter. Args: request (:class:`~.job_service.BatchDeleteJobsRequest`): The request object. Batch delete jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/foo/companies/bar" AND requisitionId = "req-1". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.BatchDeleteJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.BatchDeleteJobsRequest): request = job_service.BatchDeleteJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.batch_delete_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def list_jobs( self, request: job_service.ListJobsRequest = None, *, parent: str = None, filter: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists jobs by filter. Args: request (:class:`~.job_service.ListJobsRequest`): The request object. List jobs request. parent (:class:`str`): Required. The resource name of the tenant under which the job is created. The format is "projects/{project_id}/tenants/{tenant_id}". For example, "projects/foo/tenant/bar". If tenant id is unspecified, a default tenant is created. For example, "projects/foo". This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (:class:`str`): Required. The filter string specifies the jobs to be enumerated. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` - ``status`` Available values: OPEN, EXPIRED, ALL. Defaults to OPEN if no value is specified. Sample Query: - companyName = "projects/foo/tenants/bar/companies/baz" - companyName = "projects/foo/tenants/bar/companies/baz" AND requisitionId = "req-1" - companyName = "projects/foo/tenants/bar/companies/baz" AND status = "EXPIRED". This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListJobsPager: List jobs response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, filter]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a job_service.ListJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.ListJobsRequest): request = job_service.ListJobsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if filter is not None: request.filter = filter # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs that the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def search_jobs_for_alert( self, request: job_service.SearchJobsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchJobsForAlertPager: r"""Searches for jobs using the provided [SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest]. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the [visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs present in the database, and only returns jobs the caller has permission to search against. Args: request (:class:`~.job_service.SearchJobsRequest`): The request object. The Request body of the `SearchJobs` call. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.SearchJobsForAlertPager: Response for SearchJob method. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a job_service.SearchJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, job_service.SearchJobsRequest): request = job_service.SearchJobsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.search_jobs_for_alert] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchJobsForAlertPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response try: _client_info = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: _client_info = gapic_v1.client_info.ClientInfo() __all__ = ("JobServiceClient",)
replace_material_pyleecan_obj
replace first material by the second in the object Parameters ---------- obj: Pyleecan object mat1: Material material to replace mat2: Material new material comp_name_path: bool replace strictly mat1 or replace materials without comparing mat1.path and mat1.name Returns ------- is_change: bool True if a material has been replaced
from ...Classes.Material import Material from ...Functions.Material.compare_material import compare_material # MASKED: replace_material_pyleecan_obj function (lines 5-56)
def replace_material_pyleecan_obj(obj, mat1, mat2, comp_name_path=True): """ replace first material by the second in the object Parameters ---------- obj: Pyleecan object mat1: Material material to replace mat2: Material new material comp_name_path: bool replace strictly mat1 or replace materials without comparing mat1.path and mat1.name Returns ------- is_change: bool True if a material has been replaced """ is_change = False obj_dict = obj.as_dict() if comp_name_path: for key, val in obj_dict.items(): if isinstance(getattr(obj, key), Material) and getattr(obj, key) == mat1: setattr(obj, key, mat2) is_change = True # Call the function recursively to modify attributes materials elif isinstance(val, dict): is_change_recurs = replace_material_pyleecan_obj( getattr(obj, key), mat1, mat2, comp_name_path ) # update is_change if needed if not is_change: is_change = is_change_recurs else: for key, val in obj_dict.items(): # Compare materials with mat1 without name and path if isinstance(getattr(obj, key), Material) and compare_material( getattr(obj, key), mat1 ): setattr(obj, key, mat2) is_change = True # Call the function recursively to modify attributes materials elif isinstance(val, dict): is_change_recurs = replace_material_pyleecan_obj( getattr(obj, key), mat1, mat2, comp_name_path ) # update is_change if needed if not is_change: is_change = is_change_recurs return is_change
5
56
from ...Classes.Material import Material from ...Functions.Material.compare_material import compare_material def replace_material_pyleecan_obj(obj, mat1, mat2, comp_name_path=True): """ replace first material by the second in the object Parameters ---------- obj: Pyleecan object mat1: Material material to replace mat2: Material new material comp_name_path: bool replace strictly mat1 or replace materials without comparing mat1.path and mat1.name Returns ------- is_change: bool True if a material has been replaced """ is_change = False obj_dict = obj.as_dict() if comp_name_path: for key, val in obj_dict.items(): if isinstance(getattr(obj, key), Material) and getattr(obj, key) == mat1: setattr(obj, key, mat2) is_change = True # Call the function recursively to modify attributes materials elif isinstance(val, dict): is_change_recurs = replace_material_pyleecan_obj( getattr(obj, key), mat1, mat2, comp_name_path ) # update is_change if needed if not is_change: is_change = is_change_recurs else: for key, val in obj_dict.items(): # Compare materials with mat1 without name and path if isinstance(getattr(obj, key), Material) and compare_material( getattr(obj, key), mat1 ): setattr(obj, key, mat2) is_change = True # Call the function recursively to modify attributes materials elif isinstance(val, dict): is_change_recurs = replace_material_pyleecan_obj( getattr(obj, key), mat1, mat2, comp_name_path ) # update is_change if needed if not is_change: is_change = is_change_recurs return is_change
generate_anchors_for_grid_cell
Like ATSS, generate anchors based on grid size. Args: feats (List[Tensor]): shape[s, (b, c, h, w)] fpn_strides (tuple|list): shape[s], stride for each scale feature grid_cell_size (float): anchor size grid_cell_offset (float): The range is between 0 and 1. Returns: anchors (List[Tensor]): shape[s, (l, 4)] num_anchors_list (List[int]): shape[s] stride_tensor_list (List[Tensor]): shape[s, (l, 1)]
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import paddle import paddle.nn.functional as F __all__ = [ 'pad_gt', 'gather_topk_anchors', 'check_points_inside_bboxes', 'compute_max_iou_anchor', 'compute_max_iou_gt', 'generate_anchors_for_grid_cell' ] def pad_gt(gt_labels, gt_bboxes, gt_scores=None): r""" Pad 0 in gt_labels and gt_bboxes. Args: gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape is [B, n, 1] or [[n_1, 1], [n_2, 1], ...], here n = sum(n_i) gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape is [B, n, 4] or [[n_1, 4], [n_2, 4], ...], here n = sum(n_i) gt_scores (Tensor|List[Tensor]|None, float32): Score of gt_bboxes, shape is [B, n, 1] or [[n_1, 4], [n_2, 4], ...], here n = sum(n_i) Returns: pad_gt_labels (Tensor, int64): shape[B, n, 1] pad_gt_bboxes (Tensor, float32): shape[B, n, 4] pad_gt_scores (Tensor, float32): shape[B, n, 1] pad_gt_mask (Tensor, float32): shape[B, n, 1], 1 means bbox, 0 means no bbox """ if isinstance(gt_labels, paddle.Tensor) and isinstance(gt_bboxes, paddle.Tensor): assert gt_labels.ndim == gt_bboxes.ndim and \ gt_bboxes.ndim == 3 pad_gt_mask = ( gt_bboxes.sum(axis=-1, keepdim=True) > 0).astype(gt_bboxes.dtype) if gt_scores is None: gt_scores = pad_gt_mask.clone() assert gt_labels.ndim == gt_scores.ndim return gt_labels, gt_bboxes, gt_scores, pad_gt_mask elif isinstance(gt_labels, list) and isinstance(gt_bboxes, list): assert len(gt_labels) == len(gt_bboxes), \ 'The number of `gt_labels` and `gt_bboxes` is not equal. ' num_max_boxes = max([len(a) for a in gt_bboxes]) batch_size = len(gt_bboxes) # pad label and bbox pad_gt_labels = paddle.zeros( [batch_size, num_max_boxes, 1], dtype=gt_labels[0].dtype) pad_gt_bboxes = paddle.zeros( [batch_size, num_max_boxes, 4], dtype=gt_bboxes[0].dtype) pad_gt_scores = paddle.zeros( [batch_size, num_max_boxes, 1], dtype=gt_bboxes[0].dtype) pad_gt_mask = paddle.zeros( [batch_size, num_max_boxes, 1], dtype=gt_bboxes[0].dtype) for i, (label, bbox) in enumerate(zip(gt_labels, gt_bboxes)): if len(label) > 0 and len(bbox) > 0: pad_gt_labels[i, :len(label)] = label pad_gt_bboxes[i, :len(bbox)] = bbox pad_gt_mask[i, :len(bbox)] = 1. if gt_scores is not None: pad_gt_scores[i, :len(gt_scores[i])] = gt_scores[i] if gt_scores is None: pad_gt_scores = pad_gt_mask.clone() return pad_gt_labels, pad_gt_bboxes, pad_gt_scores, pad_gt_mask else: raise ValueError('The input `gt_labels` or `gt_bboxes` is invalid! ') def gather_topk_anchors(metrics, topk, largest=True, topk_mask=None, eps=1e-9): r""" Args: metrics (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors topk (int): The number of top elements to look for along the axis. largest (bool) : largest is a flag, if set to true, algorithm will sort by descending order, otherwise sort by ascending order. Default: True topk_mask (Tensor, bool|None): shape[B, n, topk], ignore bbox mask, Default: None eps (float): Default: 1e-9 Returns: is_in_topk (Tensor, float32): shape[B, n, L], value=1. means selected """ num_anchors = metrics.shape[-1] topk_metrics, topk_idxs = paddle.topk( metrics, topk, axis=-1, largest=largest) if topk_mask is None: topk_mask = (topk_metrics.max(axis=-1, keepdim=True) > eps).tile( [1, 1, topk]) topk_idxs = paddle.where(topk_mask, topk_idxs, paddle.zeros_like(topk_idxs)) is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2) is_in_topk = paddle.where(is_in_topk > 1, paddle.zeros_like(is_in_topk), is_in_topk) return is_in_topk.astype(metrics.dtype) def check_points_inside_bboxes(points, bboxes, eps=1e-9): r""" Args: points (Tensor, float32): shape[L, 2], "xy" format, L: num_anchors bboxes (Tensor, float32): shape[B, n, 4], "xmin, ymin, xmax, ymax" format eps (float): Default: 1e-9 Returns: is_in_bboxes (Tensor, float32): shape[B, n, L], value=1. means selected """ points = points.unsqueeze([0, 1]) x, y = points.chunk(2, axis=-1) xmin, ymin, xmax, ymax = bboxes.unsqueeze(2).chunk(4, axis=-1) l = x - xmin t = y - ymin r = xmax - x b = ymax - y bbox_ltrb = paddle.concat([l, t, r, b], axis=-1) return (bbox_ltrb.min(axis=-1) > eps).astype(bboxes.dtype) def compute_max_iou_anchor(ious): r""" For each anchor, find the GT with the largest IOU. Args: ious (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors Returns: is_max_iou (Tensor, float32): shape[B, n, L], value=1. means selected """ num_max_boxes = ious.shape[-2] max_iou_index = ious.argmax(axis=-2) is_max_iou = F.one_hot(max_iou_index, num_max_boxes).transpose([0, 2, 1]) return is_max_iou.astype(ious.dtype) def compute_max_iou_gt(ious): r""" For each GT, find the anchor with the largest IOU. Args: ious (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors Returns: is_max_iou (Tensor, float32): shape[B, n, L], value=1. means selected """ num_anchors = ious.shape[-1] max_iou_index = ious.argmax(axis=-1) is_max_iou = F.one_hot(max_iou_index, num_anchors) return is_max_iou.astype(ious.dtype) # MASKED: generate_anchors_for_grid_cell function (lines 159-195)
def generate_anchors_for_grid_cell(feats, fpn_strides, grid_cell_size=5.0, grid_cell_offset=0.5): r""" Like ATSS, generate anchors based on grid size. Args: feats (List[Tensor]): shape[s, (b, c, h, w)] fpn_strides (tuple|list): shape[s], stride for each scale feature grid_cell_size (float): anchor size grid_cell_offset (float): The range is between 0 and 1. Returns: anchors (List[Tensor]): shape[s, (l, 4)] num_anchors_list (List[int]): shape[s] stride_tensor_list (List[Tensor]): shape[s, (l, 1)] """ assert len(feats) == len(fpn_strides) anchors = [] num_anchors_list = [] stride_tensor_list = [] for feat, stride in zip(feats, fpn_strides): _, _, h, w = feat.shape cell_half_size = grid_cell_size * stride * 0.5 shift_x = (paddle.arange(end=w) + grid_cell_offset) * stride shift_y = (paddle.arange(end=h) + grid_cell_offset) * stride shift_y, shift_x = paddle.meshgrid(shift_y, shift_x) anchor = paddle.stack( [ shift_x - cell_half_size, shift_y - cell_half_size, shift_x + cell_half_size, shift_y + cell_half_size ], axis=-1).astype(feat.dtype) anchors.append(anchor.reshape([-1, 4])) num_anchors_list.append(len(anchors[-1])) stride_tensor_list.append( paddle.full([num_anchors_list[-1], 1], stride)) return anchors, num_anchors_list, stride_tensor_list
159
195
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import paddle import paddle.nn.functional as F __all__ = [ 'pad_gt', 'gather_topk_anchors', 'check_points_inside_bboxes', 'compute_max_iou_anchor', 'compute_max_iou_gt', 'generate_anchors_for_grid_cell' ] def pad_gt(gt_labels, gt_bboxes, gt_scores=None): r""" Pad 0 in gt_labels and gt_bboxes. Args: gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape is [B, n, 1] or [[n_1, 1], [n_2, 1], ...], here n = sum(n_i) gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape is [B, n, 4] or [[n_1, 4], [n_2, 4], ...], here n = sum(n_i) gt_scores (Tensor|List[Tensor]|None, float32): Score of gt_bboxes, shape is [B, n, 1] or [[n_1, 4], [n_2, 4], ...], here n = sum(n_i) Returns: pad_gt_labels (Tensor, int64): shape[B, n, 1] pad_gt_bboxes (Tensor, float32): shape[B, n, 4] pad_gt_scores (Tensor, float32): shape[B, n, 1] pad_gt_mask (Tensor, float32): shape[B, n, 1], 1 means bbox, 0 means no bbox """ if isinstance(gt_labels, paddle.Tensor) and isinstance(gt_bboxes, paddle.Tensor): assert gt_labels.ndim == gt_bboxes.ndim and \ gt_bboxes.ndim == 3 pad_gt_mask = ( gt_bboxes.sum(axis=-1, keepdim=True) > 0).astype(gt_bboxes.dtype) if gt_scores is None: gt_scores = pad_gt_mask.clone() assert gt_labels.ndim == gt_scores.ndim return gt_labels, gt_bboxes, gt_scores, pad_gt_mask elif isinstance(gt_labels, list) and isinstance(gt_bboxes, list): assert len(gt_labels) == len(gt_bboxes), \ 'The number of `gt_labels` and `gt_bboxes` is not equal. ' num_max_boxes = max([len(a) for a in gt_bboxes]) batch_size = len(gt_bboxes) # pad label and bbox pad_gt_labels = paddle.zeros( [batch_size, num_max_boxes, 1], dtype=gt_labels[0].dtype) pad_gt_bboxes = paddle.zeros( [batch_size, num_max_boxes, 4], dtype=gt_bboxes[0].dtype) pad_gt_scores = paddle.zeros( [batch_size, num_max_boxes, 1], dtype=gt_bboxes[0].dtype) pad_gt_mask = paddle.zeros( [batch_size, num_max_boxes, 1], dtype=gt_bboxes[0].dtype) for i, (label, bbox) in enumerate(zip(gt_labels, gt_bboxes)): if len(label) > 0 and len(bbox) > 0: pad_gt_labels[i, :len(label)] = label pad_gt_bboxes[i, :len(bbox)] = bbox pad_gt_mask[i, :len(bbox)] = 1. if gt_scores is not None: pad_gt_scores[i, :len(gt_scores[i])] = gt_scores[i] if gt_scores is None: pad_gt_scores = pad_gt_mask.clone() return pad_gt_labels, pad_gt_bboxes, pad_gt_scores, pad_gt_mask else: raise ValueError('The input `gt_labels` or `gt_bboxes` is invalid! ') def gather_topk_anchors(metrics, topk, largest=True, topk_mask=None, eps=1e-9): r""" Args: metrics (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors topk (int): The number of top elements to look for along the axis. largest (bool) : largest is a flag, if set to true, algorithm will sort by descending order, otherwise sort by ascending order. Default: True topk_mask (Tensor, bool|None): shape[B, n, topk], ignore bbox mask, Default: None eps (float): Default: 1e-9 Returns: is_in_topk (Tensor, float32): shape[B, n, L], value=1. means selected """ num_anchors = metrics.shape[-1] topk_metrics, topk_idxs = paddle.topk( metrics, topk, axis=-1, largest=largest) if topk_mask is None: topk_mask = (topk_metrics.max(axis=-1, keepdim=True) > eps).tile( [1, 1, topk]) topk_idxs = paddle.where(topk_mask, topk_idxs, paddle.zeros_like(topk_idxs)) is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2) is_in_topk = paddle.where(is_in_topk > 1, paddle.zeros_like(is_in_topk), is_in_topk) return is_in_topk.astype(metrics.dtype) def check_points_inside_bboxes(points, bboxes, eps=1e-9): r""" Args: points (Tensor, float32): shape[L, 2], "xy" format, L: num_anchors bboxes (Tensor, float32): shape[B, n, 4], "xmin, ymin, xmax, ymax" format eps (float): Default: 1e-9 Returns: is_in_bboxes (Tensor, float32): shape[B, n, L], value=1. means selected """ points = points.unsqueeze([0, 1]) x, y = points.chunk(2, axis=-1) xmin, ymin, xmax, ymax = bboxes.unsqueeze(2).chunk(4, axis=-1) l = x - xmin t = y - ymin r = xmax - x b = ymax - y bbox_ltrb = paddle.concat([l, t, r, b], axis=-1) return (bbox_ltrb.min(axis=-1) > eps).astype(bboxes.dtype) def compute_max_iou_anchor(ious): r""" For each anchor, find the GT with the largest IOU. Args: ious (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors Returns: is_max_iou (Tensor, float32): shape[B, n, L], value=1. means selected """ num_max_boxes = ious.shape[-2] max_iou_index = ious.argmax(axis=-2) is_max_iou = F.one_hot(max_iou_index, num_max_boxes).transpose([0, 2, 1]) return is_max_iou.astype(ious.dtype) def compute_max_iou_gt(ious): r""" For each GT, find the anchor with the largest IOU. Args: ious (Tensor, float32): shape[B, n, L], n: num_gts, L: num_anchors Returns: is_max_iou (Tensor, float32): shape[B, n, L], value=1. means selected """ num_anchors = ious.shape[-1] max_iou_index = ious.argmax(axis=-1) is_max_iou = F.one_hot(max_iou_index, num_anchors) return is_max_iou.astype(ious.dtype) def generate_anchors_for_grid_cell(feats, fpn_strides, grid_cell_size=5.0, grid_cell_offset=0.5): r""" Like ATSS, generate anchors based on grid size. Args: feats (List[Tensor]): shape[s, (b, c, h, w)] fpn_strides (tuple|list): shape[s], stride for each scale feature grid_cell_size (float): anchor size grid_cell_offset (float): The range is between 0 and 1. Returns: anchors (List[Tensor]): shape[s, (l, 4)] num_anchors_list (List[int]): shape[s] stride_tensor_list (List[Tensor]): shape[s, (l, 1)] """ assert len(feats) == len(fpn_strides) anchors = [] num_anchors_list = [] stride_tensor_list = [] for feat, stride in zip(feats, fpn_strides): _, _, h, w = feat.shape cell_half_size = grid_cell_size * stride * 0.5 shift_x = (paddle.arange(end=w) + grid_cell_offset) * stride shift_y = (paddle.arange(end=h) + grid_cell_offset) * stride shift_y, shift_x = paddle.meshgrid(shift_y, shift_x) anchor = paddle.stack( [ shift_x - cell_half_size, shift_y - cell_half_size, shift_x + cell_half_size, shift_y + cell_half_size ], axis=-1).astype(feat.dtype) anchors.append(anchor.reshape([-1, 4])) num_anchors_list.append(len(anchors[-1])) stride_tensor_list.append( paddle.full([num_anchors_list[-1], 1], stride)) return anchors, num_anchors_list, stride_tensor_list
normalize
Normalize ``url`` according to policy. :param str url: The URL to be normalized. :returns: The normalized URL. :rtype str:
import logging import random import re from uuid import UUID import dateutil.parser import w3lib.url from yarl import URL from .captcha import CaptchaSolver from .starbelly_pb2 import ( PatternMatch as PbPatternMatch, PolicyRobotsTxt as PbPolicyRobotsTxt, PolicyUrlRule as PbPolicyUrlRule ) logger = logging.getLogger(__name__) ACTION_ENUM = PbPolicyUrlRule.Action MATCH_ENUM = PbPatternMatch USAGE_ENUM = PbPolicyRobotsTxt.Usage class PolicyValidationError(Exception): ''' Custom error for policy validation. ''' def _invalid(message, location=None): ''' A helper for validating policies. ''' if location is None: raise PolicyValidationError(f'{message}.') raise PolicyValidationError(f'{message} in {location}.') class Policy: ''' A container for subpolicies. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert policy from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.Policy ''' if 'id' in doc: pb.policy_id = UUID(doc['id']).bytes pb.name = doc['name'] pb.created_at = doc['created_at'].isoformat() pb.updated_at = doc['updated_at'].isoformat() # A copy of a policy is stored with each job, so we need to be able # to gracefully handle old policies that are missing expected fields. PolicyAuthentication.convert_doc_to_pb(doc.get('authentication', dict()), pb.authentication) if doc.get('captcha_solver_id') is not None: pb.captcha_solver_id = UUID(doc['captcha_solver_id']).bytes PolicyLimits.convert_doc_to_pb(doc.get('limits', dict()), pb.limits) PolicyMimeTypeRules.convert_doc_to_pb(doc.get('mime_type_rules', list()), pb.mime_type_rules) PolicyProxyRules.convert_doc_to_pb(doc.get('proxy_rules', list()), pb.proxy_rules) PolicyRobotsTxt.convert_doc_to_pb(doc.get('robots_txt', dict()), pb.robots_txt) PolicyUrlNormalization.convert_doc_to_pb(doc.get('url_normalization', dict()), pb.url_normalization) PolicyUrlRules.convert_doc_to_pb(doc.get('url_rules', list()), pb.url_rules) PolicyUserAgents.convert_doc_to_pb(doc.get('user_agents', list()), pb.user_agents) @staticmethod def convert_pb_to_doc(pb): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.Policy. :returns: Database document. :rtype: dict ''' doc = { 'name': pb.name, 'authentication': dict(), 'limits': dict(), 'mime_type_rules': list(), 'proxy_rules': list(), 'robots_txt': dict(), 'url_normalization': dict(), 'url_rules': list(), 'user_agents': list(), } if pb.HasField('policy_id'): doc['id'] = str(UUID(bytes=pb.policy_id)) if pb.HasField('created_at'): doc['created_at'] = dateutil.parser.parse(pb.created_at) if pb.HasField('updated_at'): doc['updated_at'] = dateutil.parser.parse(pb.updated_at) PolicyAuthentication.convert_pb_to_doc(pb.authentication, doc['authentication']) if pb.HasField('captcha_solver_id'): doc['captcha_solver_id'] = str(UUID(bytes=pb.captcha_solver_id)) else: doc['captcha_solver_id'] = None PolicyLimits.convert_pb_to_doc(pb.limits, doc['limits']) PolicyMimeTypeRules.convert_pb_to_doc(pb.mime_type_rules, doc['mime_type_rules']) PolicyProxyRules.convert_pb_to_doc(pb.proxy_rules, doc['proxy_rules']) PolicyRobotsTxt.convert_pb_to_doc(pb.robots_txt, doc['robots_txt']) PolicyUrlNormalization.convert_pb_to_doc(pb.url_normalization, doc['url_normalization']) PolicyUrlRules.convert_pb_to_doc(pb.url_rules, doc['url_rules']) PolicyUserAgents.convert_pb_to_doc(pb.user_agents, doc['user_agents']) return doc def __init__(self, doc, version, seeds): ''' Initialize a policy object from its database document. :param dict doc: A database document. :param str version: The version number of Starbelly that created the policy. :param list seeds: A list of seed URLs, used for computing costs for crawled links. ''' if doc['name'].strip() == '': _invalid('Policy name cannot be blank') self.authentication = PolicyAuthentication(doc['authentication']) if 'captcha_solver' in doc: self.captcha_solver = CaptchaSolver(doc['captcha_solver']) else: self.captcha_solver = None self.limits = PolicyLimits(doc['limits']) self.mime_type_rules = PolicyMimeTypeRules(doc['mime_type_rules']) self.proxy_rules = PolicyProxyRules(doc['proxy_rules']) self.robots_txt = PolicyRobotsTxt(doc['robots_txt']) self.url_normalization = PolicyUrlNormalization( doc['url_normalization']) self.url_rules = PolicyUrlRules(doc['url_rules'], seeds) self.user_agents = PolicyUserAgents(doc['user_agents'], version) def replace_mime_type_rules(self, rules): ''' Return a shallow copy of this policy with new MIME type rules from ``doc``. :param list rules: MIME type rules in database document form. :returns: A new policy. :rtype: Policy ''' policy = Policy.__new__(Policy) policy.authentication = self.authentication policy.captcha_solver = self.captcha_solver policy.limits = self.limits policy.mime_type_rules = PolicyMimeTypeRules(rules) policy.proxy_rules = self.proxy_rules policy.robots_txt = self.robots_txt policy.url_normalization = self.url_normalization policy.url_rules = self.url_rules policy.user_agents = self.user_agents return policy class PolicyAuthentication: ''' Policy for authenticated crawling. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyAuthentication ''' pb.enabled = doc['enabled'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyAuthentication :returns: Database document. :rtype: dict ''' doc['enabled'] = pb.enabled def __init__(self, doc): ''' Initialize from a database document. :param dict doc: A database document. ''' self._enabled = doc.get('enabled', False) def is_enabled(self): ''' Return True if authentication is enabled. :rtype: bool ''' return self._enabled class PolicyLimits: ''' Limits on crawl size/duration. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyLimits ''' if doc.get('max_cost') is not None: pb.max_cost = doc['max_cost'] if doc.get('max_duration') is not None: pb.max_duration = doc['max_duration'] if doc.get('max_items') is not None: pb.max_items = doc['max_items'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyLimits :returns: Database document. :rtype: dict ''' doc['max_cost'] = pb.max_cost if pb.HasField('max_cost') else None doc['max_duration'] = pb.max_duration if pb.HasField('max_duration') \ else None doc['max_items'] = pb.max_items if pb.HasField('max_items') else None def __init__(self, doc): ''' Initialize from a database document. :param dict doc: A database document. ''' self._max_cost = doc.get('max_cost') self._max_duration = doc.get('max_duration') self._max_items = doc.get('max_items') if self._max_duration is not None and self._max_duration < 0: _invalid('Max duration must be ≥0') if self._max_items is not None and self._max_items < 0: _invalid('Max items must be ≥0') @property def max_duration(self): ''' The maximum duration that a crawl is allowed to run. :rtype: float or None ''' return self._max_duration def met_item_limit(self, items): ''' Return true if ``items`` is greater than or equal to the policy's max item count. :param int items: :rtype: bool ''' return self._max_items is not None and items >= self._max_items def exceeds_max_cost(self, cost): ''' Return true if ``cost`` is greater than the policy's max cost. :param float cost: :rtype: bool ''' return self._max_cost is not None and cost > self._max_cost class PolicyMimeTypeRules: ''' Filter responses by MIME type. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyMimeTypeRules ''' for doc_mime in doc: pb_mime = pb.add() if 'pattern' in doc_mime: pb_mime.pattern = doc_mime['pattern'] if 'match' in doc_mime: pb_mime.match = MATCH_ENUM.Value(doc_mime['match']) if 'save' in doc_mime: pb_mime.save = doc_mime['save'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyMimeTypeRules :returns: Database document. :rtype: dict ''' for pb_mime in pb: doc_mime = dict() if pb_mime.HasField('pattern'): doc_mime['pattern'] = pb_mime.pattern if pb_mime.HasField('match'): doc_mime['match'] = MATCH_ENUM.Name(pb_mime.match) if pb_mime.HasField('save'): doc_mime['save'] = pb_mime.save doc.append(doc_mime) def __init__(self, docs): ''' Initialize from database documents. :param docs: Database document. :type docs: list[dict] ''' if not docs: _invalid('At least one MIME type rule is required') # Rules are stored as list of tuples: (pattern, match, save) self._rules = list() max_index = len(docs) - 1 for index, mime_type_rule in enumerate(docs): if index < max_index: location = 'MIME type rule #{}'.format(index+1) if mime_type_rule.get('pattern', '').strip() == '': _invalid('Pattern is required', location) if 'save' not in mime_type_rule: _invalid('Save selector is required', location) if 'match' not in mime_type_rule: _invalid('Match selector is required', location) try: pattern_re = re.compile(mime_type_rule['pattern']) except: _invalid('Invalid regular expression', location) self._rules.append(( pattern_re, mime_type_rule['match'], mime_type_rule['save'], )) else: location = 'last MIME type rule' if 'save' not in mime_type_rule: _invalid('Save selector is required', location) if 'pattern' in mime_type_rule: _invalid('Pattern is not allowed', location) if 'match' in mime_type_rule: _invalid('Match selector is not allowed', location) self._rules.append((None, None, mime_type_rule['save'])) def should_save(self, mime_type): ''' Returns True if ``mime_type`` is approved by this policy. If rules are valid, this method always returns True or False. :param str mime_type: :rtype: bool ''' should_save = False for pattern, match, save in self._rules: if pattern is None: should_save = save break mimecheck = pattern.search(mime_type) is not None if match == 'DOES_NOT_MATCH': mimecheck = not mimecheck if mimecheck: should_save = save break return should_save class PolicyProxyRules: ''' Modify which proxies are used for each request. ''' PROXY_SCHEMES = ('http', 'https', 'socks4', 'socks4a', 'socks5') @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyProxyRules ''' for doc_proxy in doc: pb_proxy = pb.add() if 'pattern' in doc_proxy: pb_proxy.pattern = doc_proxy['pattern'] if 'match' in doc_proxy: pb_proxy.match = MATCH_ENUM.Value(doc_proxy['match']) if 'proxy_url' in doc_proxy: pb_proxy.proxy_url = doc_proxy['proxy_url'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyProxyRules :returns: Database document. :rtype: dict ''' for pb_proxy in pb: doc_proxy = dict() if pb_proxy.HasField('pattern'): doc_proxy['pattern'] = pb_proxy.pattern if pb_proxy.HasField('match'): doc_proxy['match'] = MATCH_ENUM.Name(pb_proxy.match) if pb_proxy.HasField('proxy_url'): doc_proxy['proxy_url'] = pb_proxy.proxy_url doc.append(doc_proxy) def __init__(self, docs): ''' Initialize from database documents. :param docs: Database document. :type docs: list[dict] ''' # Rules are stored as list of tuples: (pattern, match, proxy_type, # proxy_url) self._rules = list() max_index = len(docs) - 1 for index, proxy_rule in enumerate(docs): if index < max_index: location = 'proxy rule #{}'.format(index+1) if proxy_rule.get('pattern', '').strip() == '': _invalid('Pattern is required', location) try: pattern_re = re.compile(proxy_rule['pattern']) except: _invalid('Invalid regular expression', location) try: match = (proxy_rule['match'] == 'MATCHES') except KeyError: _invalid('Match selector is required', location) proxy_url = proxy_rule.get('proxy_url', '') if proxy_url == '': _invalid('Proxy URL is required', location) else: location = 'last proxy rule' if 'pattern' in proxy_rule: _invalid('Pattern is not allowed', location) if 'match' in proxy_rule: _invalid('Pattern is not allowed', location) pattern_re = None match = None proxy_type = None proxy_url = proxy_rule.get('proxy_url') if proxy_url is None: proxy_type = None else: try: parsed = URL(proxy_url) proxy_type = parsed.scheme if proxy_type not in self.PROXY_SCHEMES: raise ValueError() except: schemes = ', '.join(self.PROXY_SCHEMES) _invalid('Must have a valid URL with one of the ' f'following schemes: {schemes}', location) self._rules.append(( pattern_re, match, proxy_type, proxy_url, )) def get_proxy_url(self, target_url): ''' Return a proxy (type, URL) tuple associated with ``target_url`` or (None, None) if no such proxy is defined. :param str target_url: :rtype: tuple[proxy_type,URL] ''' proxy = None, None for pattern, needs_match, proxy_type, proxy_url in self._rules: if pattern is not None: has_match = pattern.search(target_url) is not None if has_match == needs_match: proxy = proxy_type, proxy_url break elif proxy_url is not None: proxy = proxy_type, proxy_url break return proxy class PolicyRobotsTxt: ''' Designate how robots.txt affects crawl behavior. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyRobotsTxt ''' pb.usage = USAGE_ENUM.Value(doc['usage']) @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyRobotsTxt :returns: Database document. :rtype: dict ''' if pb.HasField('usage'): doc['usage'] = USAGE_ENUM.Name(pb.usage) def __init__(self, doc): ''' Initialize from a database document. :param dict doc: A database document. ''' if 'usage' not in doc: _invalid('Robots.txt usage is required') self._usage = doc['usage'] @property def usage(self): ''' OBEY, IGNORE, or INVERT ''' return self._usage class PolicyUrlNormalization: ''' Customize URL normalization. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyUrlNormalization ''' if 'enabled' in doc: pb.enabled = doc['enabled'] if 'strip_parameters' in doc: pb.strip_parameters.extend(doc['strip_parameters']) @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyUrlNormalization :returns: Database document. :rtype: dict ''' if pb.HasField('enabled'): doc['enabled'] = pb.enabled doc['strip_parameters'] = list(pb.strip_parameters) def __init__(self, doc): ''' Initialize from a database document. :param dict doc: A database document. ''' self._enabled = doc.get('enabled', True) self._strip_parameters = doc.get('strip_parameters', list()) # MASKED: normalize function (lines 607-622) class PolicyUrlRules: ''' Customize link priorities based on URL. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyUrlRules ''' for doc_url in doc: pb_url = pb.add() if 'pattern' in doc_url: pb_url.pattern = doc_url['pattern'] if 'match' in doc_url: pb_url.match = MATCH_ENUM.Value(doc_url['match']) if 'action' in doc_url: pb_url.action = ACTION_ENUM.Value(doc_url['action']) if 'amount' in doc_url: pb_url.amount = doc_url['amount'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyUrlRules :returns: Database document. :rtype: dict ''' for pb_url in pb: doc_url = dict() if pb_url.HasField('pattern'): doc_url['pattern'] = pb_url.pattern if pb_url.HasField('match'): doc_url['match'] = MATCH_ENUM.Name(pb_url.match) if pb_url.HasField('action'): doc_url['action'] = ACTION_ENUM.Name(pb_url.action) if pb_url.HasField('amount'): doc_url['amount'] = pb_url.amount doc.append(doc_url) def __init__(self, docs, seeds): ''' Initialize from database documents. :param docs: Database document. :type docs: list[dict] :param seeds: Seed URLs, used for computing the costs for crawled links. :type seeds: list[str] ''' if not docs: _invalid('At least one URL rule is required') # Rules are stored as tuples: (pattern, match, action, amount) self._rules = list() max_index = len(docs) - 1 seed_domains = {URL(seed).host for seed in seeds} for index, url_rule in enumerate(docs): if index < max_index: location = 'URL rule #{}'.format(index+1) if url_rule.get('pattern', '').strip() == '': _invalid('Pattern is required', location) if 'match' not in url_rule: _invalid('Match selector is required', location) if 'action' not in url_rule: _invalid('Action selector is required', location) if 'amount' not in url_rule: _invalid('Amount is required', location) try: pattern_re = re.compile(url_rule['pattern'] .format(SEED_DOMAINS='|'.join(seed_domains))) except: _invalid('Invalid regular expression', location) self._rules.append(( pattern_re, url_rule['match'], url_rule['action'], url_rule['amount'], )) else: location = 'last URL rule' if 'pattern' in url_rule: _invalid('Pattern is not allowed', location) if 'match' in url_rule: _invalid('Match is not allowed', location) if 'action' not in url_rule: _invalid('Action is required', location) if 'amount' not in url_rule: _invalid('Amount is required', location) self._rules.append(( None, None, url_rule['action'], url_rule['amount'], )) def get_cost(self, parent_cost, url): ''' Return the cost for a URL. :param float parent_cost: The cost of the resource which yielded this URL. :param str url: The URL to compute cost for. :returns: Cost of ``url``. :rtype: float ''' # pylint: disable=undefined-loop-variable for pattern, match, action, amount in self._rules: if pattern is None: break else: result = pattern.search(url) is not None if match == 'DOES_NOT_MATCH': result = not result if result: break if action == 'ADD': return parent_cost + amount return parent_cost * amount class PolicyUserAgents: ''' Specify user agent string to send in HTTP requests. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyUserAgents ''' for doc_user_agent in doc: pb_user_agent = pb.add() pb_user_agent.name = doc_user_agent['name'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyUserAgents :returns: Database document. :rtype: dict ''' for user_agent in pb: doc.append({ 'name': user_agent.name, }) def __init__(self, docs, version): ''' Initialize from database documents. :param docs: Database document. :type docs: list[dict] :param str version: The version number interpolated into ``{VERSION}``. ''' if not docs: _invalid('At least one user agent is required') self._user_agents = list() for index, user_agent in enumerate(docs): location = 'User agent #{}'.format(index + 1) if user_agent.get('name', '').strip() == '': _invalid('Name is required', location) self._user_agents.append(user_agent['name'].format(VERSION=version)) def get_first_user_agent(self): ''' :returns: Return the first user agent. :rtype: str ''' return self._user_agents[0] def get_user_agent(self): ''' :returns: A randomly selected user agent string. :rtype: str ''' return random.choice(self._user_agents)
def normalize(self, url): ''' Normalize ``url`` according to policy. :param str url: The URL to be normalized. :returns: The normalized URL. :rtype str: ''' if self._enabled: if self._strip_parameters: url = w3lib.url.url_query_cleaner(url, remove=True, unique=False, parameterlist=self._strip_parameters) url = w3lib.url.canonicalize_url(url) return url
607
622
import logging import random import re from uuid import UUID import dateutil.parser import w3lib.url from yarl import URL from .captcha import CaptchaSolver from .starbelly_pb2 import ( PatternMatch as PbPatternMatch, PolicyRobotsTxt as PbPolicyRobotsTxt, PolicyUrlRule as PbPolicyUrlRule ) logger = logging.getLogger(__name__) ACTION_ENUM = PbPolicyUrlRule.Action MATCH_ENUM = PbPatternMatch USAGE_ENUM = PbPolicyRobotsTxt.Usage class PolicyValidationError(Exception): ''' Custom error for policy validation. ''' def _invalid(message, location=None): ''' A helper for validating policies. ''' if location is None: raise PolicyValidationError(f'{message}.') raise PolicyValidationError(f'{message} in {location}.') class Policy: ''' A container for subpolicies. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert policy from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.Policy ''' if 'id' in doc: pb.policy_id = UUID(doc['id']).bytes pb.name = doc['name'] pb.created_at = doc['created_at'].isoformat() pb.updated_at = doc['updated_at'].isoformat() # A copy of a policy is stored with each job, so we need to be able # to gracefully handle old policies that are missing expected fields. PolicyAuthentication.convert_doc_to_pb(doc.get('authentication', dict()), pb.authentication) if doc.get('captcha_solver_id') is not None: pb.captcha_solver_id = UUID(doc['captcha_solver_id']).bytes PolicyLimits.convert_doc_to_pb(doc.get('limits', dict()), pb.limits) PolicyMimeTypeRules.convert_doc_to_pb(doc.get('mime_type_rules', list()), pb.mime_type_rules) PolicyProxyRules.convert_doc_to_pb(doc.get('proxy_rules', list()), pb.proxy_rules) PolicyRobotsTxt.convert_doc_to_pb(doc.get('robots_txt', dict()), pb.robots_txt) PolicyUrlNormalization.convert_doc_to_pb(doc.get('url_normalization', dict()), pb.url_normalization) PolicyUrlRules.convert_doc_to_pb(doc.get('url_rules', list()), pb.url_rules) PolicyUserAgents.convert_doc_to_pb(doc.get('user_agents', list()), pb.user_agents) @staticmethod def convert_pb_to_doc(pb): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.Policy. :returns: Database document. :rtype: dict ''' doc = { 'name': pb.name, 'authentication': dict(), 'limits': dict(), 'mime_type_rules': list(), 'proxy_rules': list(), 'robots_txt': dict(), 'url_normalization': dict(), 'url_rules': list(), 'user_agents': list(), } if pb.HasField('policy_id'): doc['id'] = str(UUID(bytes=pb.policy_id)) if pb.HasField('created_at'): doc['created_at'] = dateutil.parser.parse(pb.created_at) if pb.HasField('updated_at'): doc['updated_at'] = dateutil.parser.parse(pb.updated_at) PolicyAuthentication.convert_pb_to_doc(pb.authentication, doc['authentication']) if pb.HasField('captcha_solver_id'): doc['captcha_solver_id'] = str(UUID(bytes=pb.captcha_solver_id)) else: doc['captcha_solver_id'] = None PolicyLimits.convert_pb_to_doc(pb.limits, doc['limits']) PolicyMimeTypeRules.convert_pb_to_doc(pb.mime_type_rules, doc['mime_type_rules']) PolicyProxyRules.convert_pb_to_doc(pb.proxy_rules, doc['proxy_rules']) PolicyRobotsTxt.convert_pb_to_doc(pb.robots_txt, doc['robots_txt']) PolicyUrlNormalization.convert_pb_to_doc(pb.url_normalization, doc['url_normalization']) PolicyUrlRules.convert_pb_to_doc(pb.url_rules, doc['url_rules']) PolicyUserAgents.convert_pb_to_doc(pb.user_agents, doc['user_agents']) return doc def __init__(self, doc, version, seeds): ''' Initialize a policy object from its database document. :param dict doc: A database document. :param str version: The version number of Starbelly that created the policy. :param list seeds: A list of seed URLs, used for computing costs for crawled links. ''' if doc['name'].strip() == '': _invalid('Policy name cannot be blank') self.authentication = PolicyAuthentication(doc['authentication']) if 'captcha_solver' in doc: self.captcha_solver = CaptchaSolver(doc['captcha_solver']) else: self.captcha_solver = None self.limits = PolicyLimits(doc['limits']) self.mime_type_rules = PolicyMimeTypeRules(doc['mime_type_rules']) self.proxy_rules = PolicyProxyRules(doc['proxy_rules']) self.robots_txt = PolicyRobotsTxt(doc['robots_txt']) self.url_normalization = PolicyUrlNormalization( doc['url_normalization']) self.url_rules = PolicyUrlRules(doc['url_rules'], seeds) self.user_agents = PolicyUserAgents(doc['user_agents'], version) def replace_mime_type_rules(self, rules): ''' Return a shallow copy of this policy with new MIME type rules from ``doc``. :param list rules: MIME type rules in database document form. :returns: A new policy. :rtype: Policy ''' policy = Policy.__new__(Policy) policy.authentication = self.authentication policy.captcha_solver = self.captcha_solver policy.limits = self.limits policy.mime_type_rules = PolicyMimeTypeRules(rules) policy.proxy_rules = self.proxy_rules policy.robots_txt = self.robots_txt policy.url_normalization = self.url_normalization policy.url_rules = self.url_rules policy.user_agents = self.user_agents return policy class PolicyAuthentication: ''' Policy for authenticated crawling. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyAuthentication ''' pb.enabled = doc['enabled'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyAuthentication :returns: Database document. :rtype: dict ''' doc['enabled'] = pb.enabled def __init__(self, doc): ''' Initialize from a database document. :param dict doc: A database document. ''' self._enabled = doc.get('enabled', False) def is_enabled(self): ''' Return True if authentication is enabled. :rtype: bool ''' return self._enabled class PolicyLimits: ''' Limits on crawl size/duration. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyLimits ''' if doc.get('max_cost') is not None: pb.max_cost = doc['max_cost'] if doc.get('max_duration') is not None: pb.max_duration = doc['max_duration'] if doc.get('max_items') is not None: pb.max_items = doc['max_items'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyLimits :returns: Database document. :rtype: dict ''' doc['max_cost'] = pb.max_cost if pb.HasField('max_cost') else None doc['max_duration'] = pb.max_duration if pb.HasField('max_duration') \ else None doc['max_items'] = pb.max_items if pb.HasField('max_items') else None def __init__(self, doc): ''' Initialize from a database document. :param dict doc: A database document. ''' self._max_cost = doc.get('max_cost') self._max_duration = doc.get('max_duration') self._max_items = doc.get('max_items') if self._max_duration is not None and self._max_duration < 0: _invalid('Max duration must be ≥0') if self._max_items is not None and self._max_items < 0: _invalid('Max items must be ≥0') @property def max_duration(self): ''' The maximum duration that a crawl is allowed to run. :rtype: float or None ''' return self._max_duration def met_item_limit(self, items): ''' Return true if ``items`` is greater than or equal to the policy's max item count. :param int items: :rtype: bool ''' return self._max_items is not None and items >= self._max_items def exceeds_max_cost(self, cost): ''' Return true if ``cost`` is greater than the policy's max cost. :param float cost: :rtype: bool ''' return self._max_cost is not None and cost > self._max_cost class PolicyMimeTypeRules: ''' Filter responses by MIME type. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyMimeTypeRules ''' for doc_mime in doc: pb_mime = pb.add() if 'pattern' in doc_mime: pb_mime.pattern = doc_mime['pattern'] if 'match' in doc_mime: pb_mime.match = MATCH_ENUM.Value(doc_mime['match']) if 'save' in doc_mime: pb_mime.save = doc_mime['save'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyMimeTypeRules :returns: Database document. :rtype: dict ''' for pb_mime in pb: doc_mime = dict() if pb_mime.HasField('pattern'): doc_mime['pattern'] = pb_mime.pattern if pb_mime.HasField('match'): doc_mime['match'] = MATCH_ENUM.Name(pb_mime.match) if pb_mime.HasField('save'): doc_mime['save'] = pb_mime.save doc.append(doc_mime) def __init__(self, docs): ''' Initialize from database documents. :param docs: Database document. :type docs: list[dict] ''' if not docs: _invalid('At least one MIME type rule is required') # Rules are stored as list of tuples: (pattern, match, save) self._rules = list() max_index = len(docs) - 1 for index, mime_type_rule in enumerate(docs): if index < max_index: location = 'MIME type rule #{}'.format(index+1) if mime_type_rule.get('pattern', '').strip() == '': _invalid('Pattern is required', location) if 'save' not in mime_type_rule: _invalid('Save selector is required', location) if 'match' not in mime_type_rule: _invalid('Match selector is required', location) try: pattern_re = re.compile(mime_type_rule['pattern']) except: _invalid('Invalid regular expression', location) self._rules.append(( pattern_re, mime_type_rule['match'], mime_type_rule['save'], )) else: location = 'last MIME type rule' if 'save' not in mime_type_rule: _invalid('Save selector is required', location) if 'pattern' in mime_type_rule: _invalid('Pattern is not allowed', location) if 'match' in mime_type_rule: _invalid('Match selector is not allowed', location) self._rules.append((None, None, mime_type_rule['save'])) def should_save(self, mime_type): ''' Returns True if ``mime_type`` is approved by this policy. If rules are valid, this method always returns True or False. :param str mime_type: :rtype: bool ''' should_save = False for pattern, match, save in self._rules: if pattern is None: should_save = save break mimecheck = pattern.search(mime_type) is not None if match == 'DOES_NOT_MATCH': mimecheck = not mimecheck if mimecheck: should_save = save break return should_save class PolicyProxyRules: ''' Modify which proxies are used for each request. ''' PROXY_SCHEMES = ('http', 'https', 'socks4', 'socks4a', 'socks5') @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyProxyRules ''' for doc_proxy in doc: pb_proxy = pb.add() if 'pattern' in doc_proxy: pb_proxy.pattern = doc_proxy['pattern'] if 'match' in doc_proxy: pb_proxy.match = MATCH_ENUM.Value(doc_proxy['match']) if 'proxy_url' in doc_proxy: pb_proxy.proxy_url = doc_proxy['proxy_url'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyProxyRules :returns: Database document. :rtype: dict ''' for pb_proxy in pb: doc_proxy = dict() if pb_proxy.HasField('pattern'): doc_proxy['pattern'] = pb_proxy.pattern if pb_proxy.HasField('match'): doc_proxy['match'] = MATCH_ENUM.Name(pb_proxy.match) if pb_proxy.HasField('proxy_url'): doc_proxy['proxy_url'] = pb_proxy.proxy_url doc.append(doc_proxy) def __init__(self, docs): ''' Initialize from database documents. :param docs: Database document. :type docs: list[dict] ''' # Rules are stored as list of tuples: (pattern, match, proxy_type, # proxy_url) self._rules = list() max_index = len(docs) - 1 for index, proxy_rule in enumerate(docs): if index < max_index: location = 'proxy rule #{}'.format(index+1) if proxy_rule.get('pattern', '').strip() == '': _invalid('Pattern is required', location) try: pattern_re = re.compile(proxy_rule['pattern']) except: _invalid('Invalid regular expression', location) try: match = (proxy_rule['match'] == 'MATCHES') except KeyError: _invalid('Match selector is required', location) proxy_url = proxy_rule.get('proxy_url', '') if proxy_url == '': _invalid('Proxy URL is required', location) else: location = 'last proxy rule' if 'pattern' in proxy_rule: _invalid('Pattern is not allowed', location) if 'match' in proxy_rule: _invalid('Pattern is not allowed', location) pattern_re = None match = None proxy_type = None proxy_url = proxy_rule.get('proxy_url') if proxy_url is None: proxy_type = None else: try: parsed = URL(proxy_url) proxy_type = parsed.scheme if proxy_type not in self.PROXY_SCHEMES: raise ValueError() except: schemes = ', '.join(self.PROXY_SCHEMES) _invalid('Must have a valid URL with one of the ' f'following schemes: {schemes}', location) self._rules.append(( pattern_re, match, proxy_type, proxy_url, )) def get_proxy_url(self, target_url): ''' Return a proxy (type, URL) tuple associated with ``target_url`` or (None, None) if no such proxy is defined. :param str target_url: :rtype: tuple[proxy_type,URL] ''' proxy = None, None for pattern, needs_match, proxy_type, proxy_url in self._rules: if pattern is not None: has_match = pattern.search(target_url) is not None if has_match == needs_match: proxy = proxy_type, proxy_url break elif proxy_url is not None: proxy = proxy_type, proxy_url break return proxy class PolicyRobotsTxt: ''' Designate how robots.txt affects crawl behavior. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyRobotsTxt ''' pb.usage = USAGE_ENUM.Value(doc['usage']) @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyRobotsTxt :returns: Database document. :rtype: dict ''' if pb.HasField('usage'): doc['usage'] = USAGE_ENUM.Name(pb.usage) def __init__(self, doc): ''' Initialize from a database document. :param dict doc: A database document. ''' if 'usage' not in doc: _invalid('Robots.txt usage is required') self._usage = doc['usage'] @property def usage(self): ''' OBEY, IGNORE, or INVERT ''' return self._usage class PolicyUrlNormalization: ''' Customize URL normalization. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyUrlNormalization ''' if 'enabled' in doc: pb.enabled = doc['enabled'] if 'strip_parameters' in doc: pb.strip_parameters.extend(doc['strip_parameters']) @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyUrlNormalization :returns: Database document. :rtype: dict ''' if pb.HasField('enabled'): doc['enabled'] = pb.enabled doc['strip_parameters'] = list(pb.strip_parameters) def __init__(self, doc): ''' Initialize from a database document. :param dict doc: A database document. ''' self._enabled = doc.get('enabled', True) self._strip_parameters = doc.get('strip_parameters', list()) def normalize(self, url): ''' Normalize ``url`` according to policy. :param str url: The URL to be normalized. :returns: The normalized URL. :rtype str: ''' if self._enabled: if self._strip_parameters: url = w3lib.url.url_query_cleaner(url, remove=True, unique=False, parameterlist=self._strip_parameters) url = w3lib.url.canonicalize_url(url) return url class PolicyUrlRules: ''' Customize link priorities based on URL. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyUrlRules ''' for doc_url in doc: pb_url = pb.add() if 'pattern' in doc_url: pb_url.pattern = doc_url['pattern'] if 'match' in doc_url: pb_url.match = MATCH_ENUM.Value(doc_url['match']) if 'action' in doc_url: pb_url.action = ACTION_ENUM.Value(doc_url['action']) if 'amount' in doc_url: pb_url.amount = doc_url['amount'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyUrlRules :returns: Database document. :rtype: dict ''' for pb_url in pb: doc_url = dict() if pb_url.HasField('pattern'): doc_url['pattern'] = pb_url.pattern if pb_url.HasField('match'): doc_url['match'] = MATCH_ENUM.Name(pb_url.match) if pb_url.HasField('action'): doc_url['action'] = ACTION_ENUM.Name(pb_url.action) if pb_url.HasField('amount'): doc_url['amount'] = pb_url.amount doc.append(doc_url) def __init__(self, docs, seeds): ''' Initialize from database documents. :param docs: Database document. :type docs: list[dict] :param seeds: Seed URLs, used for computing the costs for crawled links. :type seeds: list[str] ''' if not docs: _invalid('At least one URL rule is required') # Rules are stored as tuples: (pattern, match, action, amount) self._rules = list() max_index = len(docs) - 1 seed_domains = {URL(seed).host for seed in seeds} for index, url_rule in enumerate(docs): if index < max_index: location = 'URL rule #{}'.format(index+1) if url_rule.get('pattern', '').strip() == '': _invalid('Pattern is required', location) if 'match' not in url_rule: _invalid('Match selector is required', location) if 'action' not in url_rule: _invalid('Action selector is required', location) if 'amount' not in url_rule: _invalid('Amount is required', location) try: pattern_re = re.compile(url_rule['pattern'] .format(SEED_DOMAINS='|'.join(seed_domains))) except: _invalid('Invalid regular expression', location) self._rules.append(( pattern_re, url_rule['match'], url_rule['action'], url_rule['amount'], )) else: location = 'last URL rule' if 'pattern' in url_rule: _invalid('Pattern is not allowed', location) if 'match' in url_rule: _invalid('Match is not allowed', location) if 'action' not in url_rule: _invalid('Action is required', location) if 'amount' not in url_rule: _invalid('Amount is required', location) self._rules.append(( None, None, url_rule['action'], url_rule['amount'], )) def get_cost(self, parent_cost, url): ''' Return the cost for a URL. :param float parent_cost: The cost of the resource which yielded this URL. :param str url: The URL to compute cost for. :returns: Cost of ``url``. :rtype: float ''' # pylint: disable=undefined-loop-variable for pattern, match, action, amount in self._rules: if pattern is None: break else: result = pattern.search(url) is not None if match == 'DOES_NOT_MATCH': result = not result if result: break if action == 'ADD': return parent_cost + amount return parent_cost * amount class PolicyUserAgents: ''' Specify user agent string to send in HTTP requests. ''' @staticmethod def convert_doc_to_pb(doc, pb): ''' Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyUserAgents ''' for doc_user_agent in doc: pb_user_agent = pb.add() pb_user_agent.name = doc_user_agent['name'] @staticmethod def convert_pb_to_doc(pb, doc): ''' Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyUserAgents :returns: Database document. :rtype: dict ''' for user_agent in pb: doc.append({ 'name': user_agent.name, }) def __init__(self, docs, version): ''' Initialize from database documents. :param docs: Database document. :type docs: list[dict] :param str version: The version number interpolated into ``{VERSION}``. ''' if not docs: _invalid('At least one user agent is required') self._user_agents = list() for index, user_agent in enumerate(docs): location = 'User agent #{}'.format(index + 1) if user_agent.get('name', '').strip() == '': _invalid('Name is required', location) self._user_agents.append(user_agent['name'].format(VERSION=version)) def get_first_user_agent(self): ''' :returns: Return the first user agent. :rtype: str ''' return self._user_agents[0] def get_user_agent(self): ''' :returns: A randomly selected user agent string. :rtype: str ''' return random.choice(self._user_agents)