content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from string import ascii_letters
from typing import Dict, Optional
if __name__ == "__main__":
main()
| [
6738,
4731,
1330,
355,
979,
72,
62,
15653,
198,
6738,
19720,
1330,
360,
713,
11,
32233,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 3 | 37 |
from project.motorcycle import Motorcycle
| [
6738,
1628,
13,
76,
20965,
13696,
1330,
12533,
13696,
628
] | 4.3 | 10 |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import input
from builtins import str
from builtins import range
from past.utils import old_div
import sys
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
from pylab import *
import pmagpy.pmagplotlib as pmagplotlib
# contributed by Ron Shaar 6/26/08
#
def smooth(x,window_len,window='bartlett'):
"""smooth the data using a sliding window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by padding the beginning and the end of the signal
with average of the first (last) ten values of the signal, to evoid jumps
at the beggining/end
input:
x: the input signal, equaly spaced!
window_len: the dimension of the smoothing window
window: type of window from numpy library ['flat','hanning','hamming','bartlett','blackman']
-flat window will produce a moving average smoothing.
-Bartlett window is very similar to triangular window,
but always ends with zeros at points 1 and n,
-hanning,hamming,blackman are used for smoothing the Fourier transfrom
for curie temperature calculation the default is Bartlett
output:
aray of the smoothed signal
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
# numpy available windows
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
# padding the beggining and the end of the signal with an average value to evoid edge effect
start=[average(x[0:10])]*window_len
end=[average(x[-10:])]*window_len
s=start+list(x)+end
#s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(old_div(w,w.sum()),s,mode='same')
return array(y[window_len:-window_len])
def deriv1(x,y,i,n):
"""
alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position.
"""
m_,x_,y_,xy_,x_2=0.,0.,0.,0.,0.
for ix in range(i,i+n,1):
x_=x_+x[ix]
y_=y_+y[ix]
xy_=xy_+x[ix]*y[ix]
x_2=x_2+x[ix]**2
m= old_div(( (n*xy_) - (x_*y_) ), ( n*x_2-(x_)**2))
return(m)
def main():
"""
NAME
curie.py
DESCTIPTION
plots and interprets curie temperature data.
the 1st derivative is calculated from smoothed M-T curve
(convolution with trianfular window with width= <-w> degrees)
the 2nd derivative is calculated from smoothed 1st derivative curve
( using the same sliding window width)
the estinated curie temp. is the maximum of the 2nd derivative
- the temperature steps should be in multiples of 1.0 degrees
INPUT
T,M
SYNTAX
curie.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, sets M,T input file (required)
-w size of sliding window in degrees (default - 3 degrees)
-t <min> <max> temperature range (optional)
-sav save figures and quit
-fmt [svg,jpg,eps,png,pdf] set format for figure output [default: svg]
example:
curie.py -f ex2.1 -w 30 -t 300 700
"""
plot,fmt=0,'svg'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
meas_file=sys.argv[ind+1]
else:
print("missing -f\n")
sys.exit()
if '-w' in sys.argv:
ind=sys.argv.index('-w')
window_len=int(sys.argv[ind+1])
else:
window_len=3
if '-t' in sys.argv:
ind=sys.argv.index('-t')
t_begin=int(sys.argv[ind+1])
t_end=int(sys.argv[ind+2])
else:
t_begin=''
t_end=''
if '-sav' in sys.argv:plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
# read data from file
Data=numpy.loadtxt(meas_file,dtype=numpy.float)
T=Data.transpose()[0]
M=Data.transpose()[1]
T=list(T)
M=list(M)
# cut the data if -t is one of the flags
if t_begin:
while T[0]<t_begin:
M.pop(0);T.pop(0)
while T[-1]>t_end:
M.pop(-1);T.pop(-1)
# prepare the signal:
# from M(T) array with unequal deltaT
# to M(T) array with deltaT=(1 degree).
# if delataT is larger, then points are added using linear fit between
# consecutive data points.
# exit if deltaT is not integer
i=0
while i<(len(T)-1):
if (T[i+1]-T[i])%1>0.001:
print("delta T should be integer, this program will not work!")
print("temperature range:",T[i],T[i+1])
sys.exit()
if (T[i+1]-T[i])==0.:
M[i]=average([M[i],M[i+1]])
M.pop(i+1);T.pop(i+1)
elif (T[i+1]-T[i])<0.:
M.pop(i+1);T.pop(i+1)
print("check data in T=%.0f ,M[T] is ignored"%(T[i]))
elif (T[i+1]-T[i])>1.:
slope,b=polyfit([T[i],T[i+1]],[M[i],M[i+1]],1)
for j in range(int(T[i+1])-int(T[i])-1):
M.insert(i+1,slope*(T[i]+1.)+b)
T.insert(i+1,(T[i]+1.))
i=i+1
i=i+1
# calculate the smoothed signal
M=array(M,'f')
T=array(T,'f')
M_smooth=[]
M_smooth=smooth(M,window_len)
#plot the original data and the smooth data
PLT={'M_T':1,'der1':2,'der2':3,'Curie':4}
pmagplotlib.plot_init(PLT['M_T'],5,5)
string='M-T (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['M_T'],T,M_smooth,sym='-')
pmagplotlib.plot_xy(PLT['M_T'],T,M,sym='--',xlab='Temperature C',ylab='Magnetization',title=string)
#calculate first derivative
d1,T_d1=[],[]
for i in range(len(M_smooth)-1):
Dy=M_smooth[i-1]-M_smooth[i+1]
Dx=T[i-1]-T[i+1]
d1.append(old_div(Dy,Dx))
T_d1=T[1:len(T-1)]
d1=array(d1,'f')
d1_smooth=smooth(d1,window_len)
#plot the first derivative
pmagplotlib.plot_init(PLT['der1'],5,5)
string='1st derivative (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['der1'],T_d1,d1_smooth,sym='-',xlab='Temperature C',title=string)
pmagplotlib.plot_xy(PLT['der1'],T_d1,d1,sym='b--')
#calculate second derivative
d2,T_d2=[],[]
for i in range(len(d1_smooth)-1):
Dy=d1_smooth[i-1]-d1_smooth[i+1]
Dx=T[i-1]-T[i+1]
#print Dy/Dx
d2.append(old_div(Dy,Dx))
T_d2=T[2:len(T-2)]
d2=array(d2,'f')
d2_smooth=smooth(d2,window_len)
#plot the second derivative
pmagplotlib.plot_init(PLT['der2'],5,5)
string='2nd derivative (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['der2'],T_d2,d2,sym='-',xlab='Temperature C',title=string)
d2=list(d2)
print('second derivative maximum is at T=%i'%int(T_d2[d2.index(max(d2))]))
# calculate Curie temperature for different width of sliding windows
curie,curie_1=[],[]
wn=list(range(5,50,1))
for win in wn:
# calculate the smoothed signal
M_smooth=[]
M_smooth=smooth(M,win)
#calculate first derivative
d1,T_d1=[],[]
for i in range(len(M_smooth)-1):
Dy=M_smooth[i-1]-M_smooth[i+1]
Dx=T[i-1]-T[i+1]
d1.append(old_div(Dy,Dx))
T_d1=T[1:len(T-1)]
d1=array(d1,'f')
d1_smooth=smooth(d1,win)
#calculate second derivative
d2,T_d2=[],[]
for i in range(len(d1_smooth)-1):
Dy=d1_smooth[i-1]-d1_smooth[i+1]
Dx=T[i-1]-T[i+1]
d2.append(old_div(Dy,Dx))
T_d2=T[2:len(T-2)]
d2=array(d2,'f')
d2_smooth=smooth(d2,win)
d2=list(d2)
d2_smooth=list(d2_smooth)
curie.append(T_d2[d2.index(max(d2))])
curie_1.append(T_d2[d2_smooth.index(max(d2_smooth))])
#plot Curie temp for different sliding window length
pmagplotlib.plot_init(PLT['Curie'],5,5)
pmagplotlib.plot_xy(PLT['Curie'],wn,curie,sym='.',xlab='Sliding Window Width (degrees)',ylab='Curie Temp',title='Curie Statistics')
files = {}
for key in list(PLT.keys()): files[key]=str(key) + "." +fmt
if plot==0:
pmagplotlib.draw_figs(PLT)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a": pmagplotlib.save_plots(PLT,files)
else: pmagplotlib.save_plots(PLT,files)
sys.exit()
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
3170,
1040,
1330,
5128,
198,
6738,
3170,
1040,
1330,
965,
198,
6738,
3170,
1040,
1330,... | 2.034567 | 4,484 |
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms
from ltr import actors
from ltr.trainers import LTRTrainer
from ltr.dataset import Lasot, TrackingNet, Got10k
from ltr.data import processing, sampler, LTRLoader
import ltr.data.transforms as dltransforms
import ltr.models.SBDT.network as SBDT_models
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
10178,
13,
7645,
23914,
198,
198,
6738,
300,
2213,
1330,
10544,
198,
6738,
300,
2213,
13,
27432,
364,
1330,
406,
5446,
2898,
10613,
198,
... | 3.132075 | 106 |
import datetime
from unittest import mock
from dateutil import parser as dateparser
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import QueryDict
from django.test import TestCase
from django.urls import reverse
from rest_framework.authtoken.models import Token
from rest_framework.test import APIRequestFactory, force_authenticate
import mediaplatform_jwp.api.delivery as api
import mediaplatform.models as mpmodels
from . import create_stats_table, delete_stats_table, add_stat
from .. import views
DELIVERY_VIDEO_FIXTURE = {
'key': 'mock1',
'title': 'Mock 1',
'description': 'Description for mock 1',
'date': 1234567,
'duration': 54,
'sms_acl': 'acl:WORLD:',
'sms_media_id': 'media:1234:',
'sources': [
{
'type': 'video/mp4', 'width': 1920, 'height': 1080,
'file': 'http://cdn.invalid/vid1.mp4',
},
{
'type': 'video/mp4', 'width': 720, 'height': 406,
'file': 'http://cdn.invalid/vid2.mp4',
},
],
}
| [
11748,
4818,
8079,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
3128,
22602,
1330,
30751,
355,
3128,
48610,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
3642,
... | 2.498871 | 443 |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='pubnub_curses',
version='1.5.2',
description='PubNub Curses Terminal Client',
long_description=open('README.rst').read(),
license=open('LICENSE').read(),
author='Dan Ristic',
author_email='danr@pubnub.com',
url='http://pubnub.com',
packages=['pubnub_curses'],
scripts=['bin/pubnub-curses']
) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
1438,
11639,
12984,
77,
549,
62,
66,
46998,
3256,
198,
220,
2196,
11639,
16,
13,
20,
13,
17,
3256,
198,
... | 2.609589 | 146 |
from __future__ import annotations
from ..repositories.base import HacsRepository
from .base import ActionValidationBase, ValidationException
async def async_setup_validator(repository: HacsRepository) -> Validator:
"""Set up this validator."""
return Validator(repository=repository)
class Validator(ActionValidationBase):
"""Validate the repository."""
more_info = "https://hacs.xyz/docs/publish/include#check-repository"
allow_fork = False
async def async_validate(self):
"""Validate the repository."""
if not self.repository.data.topics:
raise ValidationException("The repository has no valid topics")
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
11485,
260,
1930,
270,
1749,
13,
8692,
1330,
367,
16436,
6207,
13264,
198,
6738,
764,
8692,
1330,
7561,
7762,
24765,
14881,
11,
3254,
24765,
16922,
628,
198,
292,
13361,
825,
30351,
... | 2.995495 | 222 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
jumps = [1, 1, 1, 1, 0, 0, 0, -4, -1, 0, -3, -4, 0, -9, -3, 2, -14, 0, -17, -12, -15, -7, 0, -7, -12, -3, -17, -11, -24, -10, -16, -15, -28, -13, -28, -15, -28, -29, -20, 0, -10, -30, -13, -24, -34, -42, -25, -36, -38, -35, -23, -11, -4, -16, -15, -10, -31, 0, -16, -21, -50, -26, -31, -36, -53, -54, -12, -28, 1, -16, -65, -69, -4, -47, 1, -42, -33, -55, -72, -29, -2, -62, -40, -28, 0, -42, -78, 2, -23, -86, -75, -17, -15, -9, 0, -24, -36, -91, -64, -65, -98, -30, -21, -80, 0, -88, -105, -103, -32, -54, -62, -105, -68, -101, -73, -26, -112, -96, -66, -115, -53, -69, -99, -84, -46, -105, -16, -18, -104, -19, -16, -9, -45, -40, -40, -11, -105, -105, -72, -89, -3, -119, -74, -124, -111, -128, -79, -145, -138, -147, -92, -44, -115, -51, -139, -15, -72, -116, -149, -38, -55, -63, -62, -3, -48, -115, -33, -56, -51, -28, -8, -15, -162, -7, -24, -72, -104, -7, -23, -16, -25, -169, -157, -53, -123, -183, -127, -98, -133, -180, -96, -56, -57, -123, -123, 0, -35, -174, -91, -167, -121, -67, -47, -201, 0, -111, -158, -36, -62, -111, -114, -183, -139, -108, -74, -154, -12, -18, -182, -217, -199, -68, -212, -183, -126, -56, -112, -211, -203, -223, -40, 0, -225, -101, -24, -91, -94, -80, -190, -6, -234, -2, -222, -208, -46, -163, -136, -45, -17, -141, -18, -67, -224, -39, -135, -91, -91, -146, -158, -70, -33, -232, -54, -45, -80, -124, -221, -130, -236, -112, -238, -11, -34, -110, -198, -15, -252, -230, -118, -230, -193, -119, -162, -214, -206, -158, -199, -141, -167, -9, -140, -185, -126, -106, -293, -142, -290, -78, -137, -274, -186, -88, -167, -287, -218, -300, -5, -81, -108, -287, -276, -235, -189, -116, -16, -232, -32, -189, -78, -8, -72, -219, -12, -63, -7, -114, -170, -125, -162, -19, -140, -152, -3, -127, -314, -158, -301, -153, -62, -7, -94, -182, -61, -6, -285, -260, -123, -298, -131, -66, -155, -347, -181, -71, -143, -232, -146, -100, 0, -101, -315, -53, -348, -209, -320, -212, -358, -108, -101, -188, -218, -309, -290, -245, -253, -111, -188, -104, -296, -14, -306, -335, -87, -284, -14, -117, -143, -386, -367, -292, -251, -289, -340, -41, -85, -52, -236, -265, -265, -341, -395, -110, -311, -391, -79, -262, -214, -395, -205, -50, -318, -198, -199, -44, -153, -403, -261, -290, -55, -321, -407, -17, -30, -342, -321, -37, -197, -5, -305, -394, -373, -297, -40, -114, -240, -218, -164, -334, -337, -38, -124, -362, -209, -79, -208, -277, -341, -345, -112, -137, -306, -90, -10, -50, -447, -445, -50, -327, -374, -441, -197, -231, -31, -361, -444, -109, -294, -452, -327, -411, -137, -326, -201, -217, -277, -245, -263, -111, -286, -265, -298, -107, -204, -395, -299, -175, -158, -94, -34, 2, -55, -113, -278, -74, -380, -167, -429, -261, -57, -95, -215, -392, -121, -460, -250, -393, -41, -183, -123, -367, -387, -66, -431, -399, -295, -449, -10, -461, -392, -277, -302, -460, -197, -307, -229, -296, -415, -313, -334, -172, -303, -439, -479, -364, -156, -287, -315, -265, -153, -134, -238, -88, 1, -306, -399, -197, -363, -156, -370, -313, -365, -510, -91, -464, -177, -550, -95, -49, -108, -24, -289, -229, -547, 0, -538, -164, -202, -190, -92, -302, -416, -42, -148, -192, -246, -118, -144, -264, -497, -276, -350, -318, -219, -301, -398, -12, -292, -395, -565, -102, -118, -424, -3, -473, -94, -413, -145, -38, -97, -485, -363, -309, -250, -506, -345, -346, -447, -319, -330, -198, -255, -353, -260, -370, -22, -91, -345, -333, -315, -593, -450, -37, -380, -543, -5, -556, -164, -135, -513, -56, -166, -474, -14, -84, -561, -596, -454, -429, -457, -69, -59, -597, -598, -391, -260, -596, -384, -267, -34, -158, -531, -243, -495, -165, -190, -466, -574, -344, -365, -277, -329, -64, -616, -123, -551, -537, -412, -333, -589, -212, -376, -290, -366, -363, -477, -39, -37, -495, -317, -554, -675, -442, -427, -407, -515, -169, -113, -395, -561, -358, -214, -20, -424, -74, -311, -110, -353, -112, -217, -181, -496, -336, -311, -585, -407, -383, -663, -266, -591, -235, -266, -406, -347, -268, -281, -449, -569, -8, -178, -62, -139, -89, -72, -487, -352, -164, -244, -640, -139, -639, -330, -348, -390, -260, -632, -171, -343, -700, -21, -653, -250, -20, -587, -357, -151, -536, -287, -614, -582, -564, -136, -613, -130, -717, -54, -35, -205, -49, -711, -538, -342, -222, -579, -300, -641, -240, -198, -76, -550, -73, -528, -465, -485, -327, -433, -325, -441, -575, -661, -126, -588, -315, -651, -692, -189, -656, -533, -627, -459, -244, -737, -422, -647, -324, -759, -592, -305, -281, -360, -79, -271, -52, -129, -416, -39, -497, -147, -755, -398, -382, -217, -301, -581, -345, -310, -68, -90, -128, -303, -416, -348, -745, -204, -795, -482, -537, -315, -662, -432, -464, -239, -19, -216, -230, -240, -612, -129, -655, -197, -369, -89, -573, -180, -229, -264, -268, -401, -820, -412, -99, -666, -360, -814, -348, -755, -772, -296, -851, -818, -394, -161, -77, -109, -362, -273, -688, -574, -50, -137, -550, -380, -462, -851, -611, -237, -853, -11, -383, -767, -349, -170, -389, -747, -247, -462, -839, -87, -852, -672, -796, -839, -788, -78, -151, -507, -414, -363, -750, -521, -468, -418, -251, -803, -802, -269, -766, -520, -301, -156, -488, -130, -100, -191, -45, -352, -774, -506, -306, -517, -220, -62, -523, -111, -157, -516, -541, -888, -514, -223, -902, -159, -255, -699, -901, -893, -273, -602, -850, -382, -207, -528, -566, -834, -695, -25, -166, -650, -569, -667, -771, -809, -922, -858, -53, -703, -552, -584, -190, -193, -146, -218, -503, -252, -432, -93, -180, -277, -250, -610, -194, -415, -67, -793, -413, -930, -785, -890, -417, -501, -109, -839, -916, -860, -467, -741, -645, -795, -769, -665, -974, -318, -334, -963, -674, -432, -402, -702, -724, -524, -753, -146, -719, -953]
if __name__ == "__main__":
print("1: {}".format(puzzle1()))
print("2: {}".format(puzzle2())) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
73,
8142,
796,
685,
16,
11,
352,
11,
352,
11,
352,
11,
657,
11,
657,
11,
657,
11,
532,
19,
11,
532,
16,
11,
657... | 1.867443 | 3,093 |
controller = Controller()
controller.present()
| [
198,
198,
36500,
796,
22741,
3419,
198,
36500,
13,
25579,
3419,
628,
198
] | 3.923077 | 13 |
import math
from typing import Tuple
# Neil Bartlett
# neilbartlett.com
# 2015-01-22
#
# Copyright [2015] [Neil Bartlett] for Javascript source
# Copyright Eric Semeniuc
#
# Color Temperature is the color due to black body radiation at a given
# temperature. The temperature is given in Kelvin. The concept is widely used
# in photography and in tools such as f.lux.
#
# The function here converts a given color temperature into a near equivalent
# in the RGB colorspace. The function is based on a curve fit on standard sparse
# set of Kelvin to RGB mappings.
#
# NOTE The approximations used are suitable for photo-manipulation and other
# non-critical uses. They are not suitable for medical or other high accuracy
# use cases.
#
# Accuracy is best between 1000K and 40000K.
#
# See http://github.com/neilbartlett/color-temperature for further details.
'''
A more accurate version algorithm based on a different curve fit to the
original RGB to Kelvin data.
Input: color temperature in degrees Kelvin
Output: tuple of red, green and blue components of the Kelvin temperature
'''
# see http://www.zombieprototypes.com/?p=210 for plot and calculation of coefficients
| [
11748,
10688,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
2,
220,
15929,
13167,
15503,
198,
2,
220,
497,
346,
16575,
15503,
13,
785,
198,
2,
220,
1853,
12,
486,
12,
1828,
198,
2,
198,
2,
220,
15069,
685,
4626,
60,
685,
29354,
13... | 3.85342 | 307 |
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
1398,
373,
8295,
12,
27568,
422,
262,
7824,
10288,
1043,
379,
198,
2,
3740,
1378,
538,
323,
902,
12,
15042,
13,
16244,
263,
12,
36795,
3713,
13,
785,... | 2.810811 | 74 |
import factory
from models.A11yReport import A11yReport
from models.A11yViolation import A11yViolation
from models.Organisation import Organisation
from models.SecurityReport import SecurityReport
from models.SecurityViolation import SecurityViolation
from models.Scan import Scan
from models.ScanIgnore import ScanIgnore
from models.ScanType import ScanType
from models.Template import Template
from models.TemplateScan import TemplateScan
from models.User import User
# When adding new factories ensure you add the factory to the conftest session fixture so that they can be linked to the test db session
| [
11748,
8860,
198,
198,
6738,
4981,
13,
32,
1157,
88,
19100,
1330,
317,
1157,
88,
19100,
198,
6738,
4981,
13,
32,
1157,
88,
33894,
341,
1330,
317,
1157,
88,
33894,
341,
198,
6738,
4981,
13,
26121,
5612,
1330,
30801,
198,
6738,
4981,
... | 4.275862 | 145 |
"""
Copyright (C) 2019-2021, Monash University, Geoscience Australia
Copyright (C) 2018, Stuart Walsh
Bluecap is released under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The project uses third party components which may have different licenses.
Please refer to individual components for more details.
"""
import numpy as np
from scipy import interpolate
from scipy.special import gamma, betainc
# Functions for acting on numpy arrays
# 1d interpolation function
| [
37811,
198,
15269,
357,
34,
8,
13130,
12,
1238,
2481,
11,
2892,
1077,
2059,
11,
2269,
418,
4234,
4505,
198,
15269,
357,
34,
8,
2864,
11,
22559,
24104,
220,
198,
198,
14573,
11128,
318,
2716,
739,
262,
24843,
13789,
11,
10628,
362,
1... | 3.933333 | 240 |
# Copyright (c) 2020 The Foundry Visionmongers Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import logging
import time
from Katana import QtWidgets, QtCore, QtGui
from Katana import NodegraphAPI, Utils
import LookFileBakeAPI
import UI4
from .VariantsWidget import VariantsWidget
log = logging.getLogger("UsdMaterialBake.Editor")
NodegraphAPI.AddNodeFlavor("UsdMaterialBake", "3d")
NodegraphAPI.AddNodeFlavor("UsdMaterialBake", "lookfile")
class UsdMaterialBakeEditor(QtWidgets.QFrame):
""" The editor used for the UsdMaterialBake node parameters tab.
"""
| [
2,
15069,
357,
66,
8,
12131,
383,
4062,
563,
19009,
31059,
364,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
25189,
4891,
13789,
4943,
198,
2,
351,
262,
1708,
17613,
26,
345,
74... | 3.67696 | 421 |
import numpy as np
import pandas
if __name__ == "__main__":
cr = CalendarReader()
data = cr.read("/Users/zhifeng/.rqalpha/bundle/trading_dates.npy")
print(data) | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1067,
796,
26506,
33634,
3419,
198,
220,
220,
220,
1366,
796,
1067,
13,
961,
7203,
14,
... | 2.464789 | 71 |
import math # used in threader and checker functions for math.comb
import time # used for timing information
import numpy as np # used for array math in loop
from multiprocessing import Pool # used to run thread values in thread_list asych
from multiprocessing import cpu_count # used to detect the machines core count (virtual)
# input values
N = 100 # how many ratings
C = 5 # categories 1 to 5
inczero = False # include 0 category - True or False - like allowing missing values as these will not be evaluted in target
target = 4.91
# threads is calculated but can be overwritten
threads = 16 #cpu_count() # desired number of parallel processes to split work over, be careful and know how many computing cores you have as you can slow this down!
# function that splits all possible combination of N ratings in C categories across threads with approximate balance - this is hard!
# function to run threader, append needed columns, and print diagnostic info about the threading process
# loop through the combos from start to end specified in a thread alloation, then evaluate each row for match to the target value
# function to iterate on combinations by adding n more ratings
# setup multiprocessing:
# function that takes a single thread and runs it through loop to get matches
# function to allocate threads to a pool of processes
# the actions!
if __name__ == '__main__':
# create thread_list
thread_list = create_threads(N,C,inczero,threads,target)
# run the threads asynch and time start to end
begin = time.time()
matches = main(thread_list)
end = time.time()
# report useful information
#print("The thread list:",*thread_list,sep="\n")
if len(matches): print("Here are the matches:",*matches,sep="\n")
print("This took {} seconds".format(end - begin))
print(len(matches),"matches from",math.comb(N+C-1,C-1),"possible combinations.")
from pandas import DataFrame
df = DataFrame(matches,columns=['0','1','2','3','4','5'])
import plotly.graph_objects as go
fig = go.Figure()
for col in ['1','2','3','4','5']:
fig.add_trace(go.Bar(y=df.index, x=df[col],orientation='h',name=col,hovertemplate="%{x}"))
fig.update_layout(barmode='stack',xaxis_title="# of Ratings",yaxis_title="Combinations")
fig.update_layout(title=dict(
text=str(len(matches))+" Matching Combinations Visualized<br>For Target = "+str(target)+", "+str(N)+" Ratings, in "+str(C)+" categories , and missing ratings = "+str(inczero),
xanchor="center",
x=0.5,
font_size=14
))
fig.update_layout(legend=dict(
orientation="h",
title="Rating",
yanchor="bottom",
y=1.0,
xanchor="center",
x=0.5,
traceorder="normal"
))
fig.update_yaxes(showticklabels=False)
fig.update_xaxes(range=[0,N])
#fig.show()
fig.write_image("combos.svg")
| [
11748,
10688,
1303,
973,
287,
4704,
263,
290,
2198,
263,
5499,
329,
10688,
13,
24011,
198,
11748,
640,
1303,
973,
329,
10576,
1321,
198,
11748,
299,
32152,
355,
45941,
1303,
973,
329,
7177,
10688,
287,
9052,
198,
6738,
18540,
305,
919,
... | 2.947528 | 991 |
"""
Testing all methods from oap.utils
"""
import unittest
import oap
from tests.data import (
array01_barycenter_coordinates,
array01_original,
array01_adjust01,
array01_adjust02,
array01_toclip01,
array01_move_x01,
array01_move_x02,
array01_move_y01,
array01_move_y02
)
| [
37811,
198,
44154,
477,
5050,
422,
267,
499,
13,
26791,
198,
37811,
198,
198,
11748,
555,
715,
395,
198,
11748,
267,
499,
198,
198,
6738,
5254,
13,
7890,
1330,
357,
198,
220,
220,
220,
7177,
486,
62,
65,
560,
16159,
62,
37652,
17540... | 2.303704 | 135 |
from modron.dice import roll_die, DiceRoll
from math import isclose
from random import seed
seed(1)
_default_rolls = 2 * 10 ** 6
def _measure_probability(sides: int, target_val: int, n_trials: int = _default_rolls, **kwargs) -> float:
"""Measure the probability of a certain dice roll
Args:
sides (int): Number of sides on the die
n_trials (int): Number of times to simulate the roll
target_val (int): Target value of the dice
**kwargs: Any modifiers to the dice roll
Returns:
(float) Fraction of rolls that were the target value
"""
# Using a functional notation to avoid storing the whole array
hits = sum(map(lambda x: roll_die(sides, **kwargs)[0] == target_val, range(n_trials)))
return hits / n_trials
def test_d20():
"""Players doubted that natural 1s were just as commons as 20s or 10s"""
assert isclose(_measure_probability(20, 1), 0.05, rel_tol=1e-2)
assert isclose(_measure_probability(20, 20), 0.05, rel_tol=1e-2)
assert isclose(_measure_probability(20, 10), 0.05, rel_tol=1e-2)
| [
6738,
953,
1313,
13,
67,
501,
1330,
4836,
62,
11979,
11,
34381,
26869,
198,
6738,
10688,
1330,
318,
19836,
198,
6738,
4738,
1330,
9403,
198,
198,
28826,
7,
16,
8,
198,
62,
12286,
62,
2487,
82,
796,
362,
1635,
838,
12429,
718,
628,
... | 2.657702 | 409 |
from shorty.shortenusecase.validators.provider_validator \
import ProviderValidator
| [
6738,
1790,
88,
13,
19509,
268,
1904,
7442,
13,
12102,
2024,
13,
15234,
1304,
62,
12102,
1352,
3467,
198,
220,
220,
220,
1330,
32549,
47139,
1352,
628,
198
] | 3.214286 | 28 |
from secrets import randbelow
from sqlalchemy import MetaData
metadata = MetaData()
MAX_ID = 2 ** 31
| [
6738,
13141,
1330,
43720,
35993,
198,
198,
6738,
44161,
282,
26599,
1330,
30277,
6601,
198,
198,
38993,
796,
30277,
6601,
3419,
198,
198,
22921,
62,
2389,
796,
362,
12429,
3261,
628
] | 3.387097 | 31 |
# -*- coding: utf-8 -*-
"""Main Controller"""
from tg import expose, flash, require, url, lurl
from tg import request, redirect, tmpl_context
from tg.i18n import ugettext as _, lazy_ugettext as l_
from tg.exceptions import HTTPFound
from tg import predicates
from inviteexportmail import model
from inviteexportmail.controllers.secure import SecureController
from inviteexportmail.model import DBSession
from tgext.admin.tgadminconfig import BootstrapTGAdminConfig as TGAdminConfig
from tgext.admin.controller import AdminController
from inviteexportmail.lib.base import BaseController
from inviteexportmail.controllers.error import ErrorController
__all__ = ['ExportDataMailJMController']
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
13383,
22741,
37811,
198,
198,
6738,
256,
70,
1330,
15651,
11,
7644,
11,
2421,
11,
19016,
11,
300,
6371,
198,
6738,
256,
70,
1330,
2581,
11,
18941,
11,
256,
76,... | 3.561224 | 196 |
from panda3d.core import PandaNode
import DNAUtil | [
6738,
279,
5282,
18,
67,
13,
7295,
1330,
41112,
19667,
198,
11748,
7446,
18274,
346
] | 3.266667 | 15 |
# Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# limitations under the License.
"""Utilities for type conversion, type checking, type inference, etc."""
import collections
from typing import Any, Callable, Dict, Type, TypeVar
import attr
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import typed_object
from tensorflow_federated.python.core.impl.compiler import placement_literals
TF_DATASET_REPRESENTATION_TYPES = (tf.data.Dataset, tf.compat.v1.data.Dataset,
tf.compat.v2.data.Dataset)
def infer_type(arg):
"""Infers the TFF type of the argument (a `computation_types.Type` instance).
WARNING: This function is only partially implemented.
The kinds of arguments that are currently correctly recognized:
- tensors, variables, and data sets,
- things that are convertible to tensors (including numpy arrays, builtin
types, as well as lists and tuples of any of the above, etc.),
- nested lists, tuples, namedtuples, anonymous tuples, dict, and OrderedDicts.
Args:
arg: The argument, the TFF type of which to infer.
Returns:
Either an instance of `computation_types.Type`, or `None` if the argument is
`None`.
"""
# TODO(b/113112885): Implement the remaining cases here on the need basis.
if arg is None:
return None
elif isinstance(arg, typed_object.TypedObject):
return arg.type_signature
elif tf.is_tensor(arg):
return computation_types.TensorType(arg.dtype.base_dtype, arg.shape)
elif isinstance(arg, TF_DATASET_REPRESENTATION_TYPES):
return computation_types.SequenceType(
computation_types.to_type(tf.data.experimental.get_structure(arg)))
elif isinstance(arg, anonymous_tuple.AnonymousTuple):
return computation_types.NamedTupleType([
(k, infer_type(v)) if k else infer_type(v)
for k, v in anonymous_tuple.iter_elements(arg)
])
elif py_typecheck.is_attrs(arg):
items = attr.asdict(
arg, dict_factory=collections.OrderedDict, recurse=False)
return computation_types.NamedTupleTypeWithPyContainerType(
[(k, infer_type(v)) for k, v in items.items()], type(arg))
elif py_typecheck.is_named_tuple(arg):
items = arg._asdict()
return computation_types.NamedTupleTypeWithPyContainerType(
[(k, infer_type(v)) for k, v in items.items()], type(arg))
elif isinstance(arg, dict):
if isinstance(arg, collections.OrderedDict):
items = arg.items()
else:
items = sorted(arg.items())
return computation_types.NamedTupleTypeWithPyContainerType(
[(k, infer_type(v)) for k, v in items], type(arg))
elif isinstance(arg, (tuple, list)):
elements = []
all_elements_named = True
for element in arg:
all_elements_named &= py_typecheck.is_name_value_pair(element)
elements.append(infer_type(element))
# If this is a tuple of (name, value) pairs, the caller most likely intended
# this to be a NamedTupleType, so we avoid storing the Python container.
if all_elements_named:
return computation_types.NamedTupleType(elements)
else:
return computation_types.NamedTupleTypeWithPyContainerType(
elements, type(arg))
elif isinstance(arg, str):
return computation_types.TensorType(tf.string)
elif isinstance(arg, (np.generic, np.ndarray)):
return computation_types.TensorType(
tf.dtypes.as_dtype(arg.dtype), arg.shape)
else:
dtype = {bool: tf.bool, int: tf.int32, float: tf.float32}.get(type(arg))
if dtype:
return computation_types.TensorType(dtype)
else:
# Now fall back onto the heavier-weight processing, as all else failed.
# Use make_tensor_proto() to make sure to handle it consistently with
# how TensorFlow is handling values (e.g., recognizing int as int32, as
# opposed to int64 as in NumPy).
try:
# TODO(b/113112885): Find something more lightweight we could use here.
tensor_proto = tf.make_tensor_proto(arg)
return computation_types.TensorType(
tf.dtypes.as_dtype(tensor_proto.dtype),
tf.TensorShape(tensor_proto.tensor_shape))
except TypeError as err:
raise TypeError('Could not infer the TFF type of {}: {}'.format(
py_typecheck.type_string(type(arg)), err))
def to_canonical_value(value):
"""Converts a Python object to a canonical TFF value for a given type.
Args:
value: The object to convert.
Returns:
The canonical TFF representation of `value` for a given type.
"""
if value is None:
return None
elif isinstance(value, dict):
if isinstance(value, collections.OrderedDict):
items = value.items()
else:
items = sorted(value.items())
return anonymous_tuple.AnonymousTuple(
(k, to_canonical_value(v)) for k, v in items)
elif isinstance(value, (tuple, list)):
return [to_canonical_value(e) for e in value]
return value
def check_type(val, type_spec):
"""Checks whether `val` is of TFF type `type_spec`.
Args:
val: The object to check.
type_spec: An instance of `tff.Type` or something convertible to it that the
`val` is checked against.
Raises:
TypeError: If the inefferred type of `val` is not `type_spec`.
"""
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.Type)
val_type = infer_type(val)
if not is_assignable_from(type_spec, val_type):
raise TypeError(
'Expected TFF type {}, which is not assignable from {}.'.format(
type_spec, val_type))
def tf_dtypes_and_shapes_to_type(dtypes, shapes):
"""Returns computation_types.Type for the given TF (dtypes, shapes) tuple.
The returned dtypes and shapes match those used by `tf.data.Dataset`s to
indicate the type and shape of their elements. They can be used, e.g., as
arguments in constructing an iterator over a string handle. Note that the
nested structure of dtypes and shapes must be identical.
Args:
dtypes: A nested structure of dtypes, such as what is returned by Dataset's
output_dtypes property.
shapes: A nested structure of shapes, such as what is returned by Dataset's
output_shapes property.
Returns:
The corresponding instance of computation_types.Type.
Raises:
TypeError: if the arguments are of types that weren't recognized.
"""
tf.nest.assert_same_structure(dtypes, shapes)
if isinstance(dtypes, tf.DType):
return computation_types.TensorType(dtypes, shapes)
elif py_typecheck.is_named_tuple(dtypes):
# Special handling needed for collections.namedtuple due to the lack of
# a base class. Note this must precede the test for being a list.
dtype_dict = dtypes._asdict()
shape_dict = shapes._asdict()
return computation_types.NamedTupleTypeWithPyContainerType(
_parallel_dict_to_element_list(dtype_dict, shape_dict), type(dtypes))
elif py_typecheck.is_attrs(dtypes):
dtype_dict = attr.asdict(
dtypes, dict_factory=collections.OrderedDict, recurse=False)
shapes_dict = attr.asdict(
shapes, dict_factory=collections.OrderedDict, recurse=False)
return computation_types.NamedTupleTypeWithPyContainerType(
_parallel_dict_to_element_list(dtype_dict, shapes_dict), type(dtypes))
elif isinstance(dtypes, dict):
if isinstance(dtypes, collections.OrderedDict):
items = dtypes.items()
else:
items = sorted(dtypes.items())
elements = [(name, tf_dtypes_and_shapes_to_type(dtypes_elem, shapes[name]))
for name, dtypes_elem in items]
return computation_types.NamedTupleTypeWithPyContainerType(
elements, type(dtypes))
elif isinstance(dtypes, (list, tuple)):
return computation_types.NamedTupleTypeWithPyContainerType([
tf_dtypes_and_shapes_to_type(dtypes_elem, shapes[idx])
for idx, dtypes_elem in enumerate(dtypes)
], type(dtypes))
else:
raise TypeError('Unrecognized: dtypes {}, shapes {}.'.format(
dtypes, shapes))
def type_to_tf_dtypes_and_shapes(type_spec):
"""Returns nested structures of tensor dtypes and shapes for a given TFF type.
The returned dtypes and shapes match those used by `tf.data.Dataset`s to
indicate the type and shape of their elements. They can be used, e.g., as
arguments in constructing an iterator over a string handle.
Args:
type_spec: Type specification, either an instance of
`computation_types.Type`, or something convertible to it. The type
specification must be composed of only named tuples and tensors. In all
named tuples that appear in the type spec, all the elements must be named.
Returns:
A pair of parallel nested structures with the dtypes and shapes of tensors
defined in `type_spec`. The layout of the two structures returned is the
same as the layout of the nested type defined by `type_spec`. Named tuples
are represented as dictionaries.
Raises:
ValueError: if the `type_spec` is composed of something other than named
tuples and tensors, or if any of the elements in named tuples are unnamed.
"""
type_spec = computation_types.to_type(type_spec)
if isinstance(type_spec, computation_types.TensorType):
return (type_spec.dtype, type_spec.shape)
elif isinstance(type_spec, computation_types.NamedTupleType):
elements = anonymous_tuple.to_elements(type_spec)
if not elements:
output_dtypes = []
output_shapes = []
elif elements[0][0] is not None:
output_dtypes = collections.OrderedDict()
output_shapes = collections.OrderedDict()
for e in elements:
element_name = e[0]
element_spec = e[1]
if element_name is None:
raise ValueError(
'When a sequence appears as a part of a parameter to a section '
'of TensorFlow code, in the type signature of elements of that '
'sequence all named tuples must have their elements explicitly '
'named, and this does not appear to be the case in {}.'.format(
type_spec))
element_output = type_to_tf_dtypes_and_shapes(element_spec)
output_dtypes[element_name] = element_output[0]
output_shapes[element_name] = element_output[1]
else:
output_dtypes = []
output_shapes = []
for e in elements:
element_name = e[0]
element_spec = e[1]
if element_name is not None:
raise ValueError(
'When a sequence appears as a part of a parameter to a section '
'of TensorFlow code, in the type signature of elements of that '
'sequence all named tuples must have their elements explicitly '
'named, and this does not appear to be the case in {}.'.format(
type_spec))
element_output = type_to_tf_dtypes_and_shapes(element_spec)
output_dtypes.append(element_output[0])
output_shapes.append(element_output[1])
if isinstance(type_spec,
computation_types.NamedTupleTypeWithPyContainerType):
container_type = computation_types.NamedTupleTypeWithPyContainerType.get_container_type(
type_spec)
output_dtypes = build_py_container(output_dtypes)
output_shapes = build_py_container(output_shapes)
else:
output_dtypes = tuple(output_dtypes)
output_shapes = tuple(output_shapes)
return (output_dtypes, output_shapes)
else:
raise ValueError('Unsupported type {}.'.format(
py_typecheck.type_string(type(type_spec))))
def type_to_tf_tensor_specs(type_spec):
"""Returns nested structure of `tf.TensorSpec`s for a given TFF type.
The dtypes and shapes of the returned `tf.TensorSpec`s match those used by
`tf.data.Dataset`s to indicate the type and shape of their elements. They can
be used, e.g., as arguments in constructing an iterator over a string handle.
Args:
type_spec: Type specification, either an instance of
`computation_types.Type`, or something convertible to it. Ther type
specification must be composed of only named tuples and tensors. In all
named tuples that appear in the type spec, all the elements must be named.
Returns:
A nested structure of `tf.TensorSpec`s with the dtypes and shapes of tensors
defined in `type_spec`. The layout of the structure returned is the same as
the layout of the nested type defined by `type_spec`. Named tuples are
represented as dictionaries.
"""
dtypes, shapes = type_to_tf_dtypes_and_shapes(type_spec)
return tf.nest.map_structure(lambda dtype, shape: tf.TensorSpec(shape, dtype),
dtypes, shapes)
def type_to_tf_structure(type_spec):
"""Returns nested `tf.data.experimental.Structure` for a given TFF type.
Args:
type_spec: Type specification, either an instance of
`computation_types.Type`, or something convertible to it. Ther type
specification must be composed of only named tuples and tensors. In all
named tuples that appear in the type spec, all the elements must be named.
Returns:
An instance of `tf.data.experimental.Structure`, possibly nested, that
corresponds to `type_spec`.
Raises:
ValueError: if the `type_spec` is composed of something other than named
tuples and tensors, or if any of the elements in named tuples are unnamed.
"""
type_spec = computation_types.to_type(type_spec)
if isinstance(type_spec, computation_types.TensorType):
return tf.TensorSpec(type_spec.shape, type_spec.dtype)
elif isinstance(type_spec, computation_types.NamedTupleType):
elements = anonymous_tuple.to_elements(type_spec)
if not elements:
raise ValueError('Empty tuples are unsupported.')
element_outputs = [(k, type_to_tf_structure(v)) for k, v in elements]
named = element_outputs[0][0] is not None
if not all((e[0] is not None) == named for e in element_outputs):
raise ValueError('Tuple elements inconsistently named.')
if not isinstance(type_spec,
computation_types.NamedTupleTypeWithPyContainerType):
if named:
output = collections.OrderedDict(element_outputs)
else:
output = tuple(v for _, v in element_outputs)
else:
container_type = computation_types.NamedTupleTypeWithPyContainerType.get_container_type(
type_spec)
if (py_typecheck.is_named_tuple(container_type) or
py_typecheck.is_attrs(container_type)):
output = container_type(**dict(element_outputs))
elif named:
output = container_type(element_outputs)
else:
output = container_type(
e if e[0] is not None else e[1] for e in element_outputs)
return output
else:
raise ValueError('Unsupported type {}.'.format(
py_typecheck.type_string(type(type_spec))))
def type_from_tensors(tensors):
"""Builds a `tff.Type` from supplied tensors.
Args:
tensors: A nested structure of tensors.
Returns:
The nested TensorType structure.
"""
if isinstance(tensors, anonymous_tuple.AnonymousTuple):
return computation_types.to_type(
anonymous_tuple.map_structure(_mapping_fn, tensors))
else:
return computation_types.to_type(
tf.nest.map_structure(_mapping_fn, tensors))
def get_named_tuple_element_type(type_spec, name):
"""Returns the type of a named tuple member.
Args:
type_spec: Type specification, either an instance of computation_types.Type
or something convertible to it by computation_types.to_type().
name: The string name of the named tuple member.
Returns:
The TFF type of the element.
Raises:
TypeError: if arguments are of the wrong computation_types.
ValueError: if the tuple does not have an element with the given name.
"""
py_typecheck.check_type(name, str)
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.NamedTupleType)
elements = anonymous_tuple.to_elements(type_spec)
for elem_name, elem_type in elements:
if name == elem_name:
return elem_type
raise ValueError('The name \'{}\' of the element does not correspond to any '
'of the names {} in the named tuple type.'.format(
name, [e[0] for e in elements if e[0]]))
T = TypeVar('T')
def preorder_call(given_type: Any, fn: Callable[[Any, T], T], arg: T):
"""Recursively calls `fn` on the possibly nested structure `given_type`.
Walks the tree in a preorder manner. Updates `arg` on the way down with
the appropriate information, as defined in `fn`.
Args:
given_type: Possibly nested `computation_types.Type` or object convertible
to it by `computation_types.to_type`.
fn: Function to apply to each of the constituent elements of `given_type`
with the argument `arg`. Must return an updated version of `arg` which
incorporated the information we'd like to track as we move down the nested
type tree.
arg: Initial state of information to be passed down the tree.
"""
type_signature = computation_types.to_type(given_type)
arg = fn(type_signature, arg)
if isinstance(type_signature, computation_types.FederatedType):
preorder_call(type_signature.member, fn, arg)
elif isinstance(type_signature, computation_types.SequenceType):
preorder_call(type_signature.element, fn, arg)
elif isinstance(type_signature, computation_types.FunctionType):
preorder_call(type_signature.parameter, fn, arg)
preorder_call(type_signature.result, fn, arg)
elif isinstance(type_signature, computation_types.NamedTupleType):
for element in anonymous_tuple.iter_elements(type_signature):
preorder_call(element[1], fn, arg)
def check_well_formed(type_spec):
"""Checks that `type_spec` represents a well-formed type.
Performs the following checks of well-formedness for `type_spec`:
1. If `type_spec` contains a `computation_types.FederatedType`, checks
that its `member` contains nowhere in its structure intances
of `computation_types.FunctionType` or `computation_types.FederatedType`.
2. If `type_spec` contains a `computation_types.SequenceType`, checks that
its `element` contains nowhere in its structure instances of
`computation_types.SequenceType`, `computation_types.FederatedType`
or `computation_types.FunctionType`.
Args:
type_spec: The type specification to check, either an instance of
`computation_types.Type` or something convertible to it by
`computation_types.to_type()`.
Raises:
TypeError: if `type_spec` is not a well-formed TFF type.
"""
# TODO(b/113112885): Reinstate a call to `check_all_abstract_types_are_bound`
# after revising the definition of well-formedness.
type_signature = computation_types.to_type(type_spec)
def _check_for_disallowed_type(
type_to_check: Any,
disallowed_types: Dict[Type[Any], str],
) -> Dict[Type[Any], str]:
"""Checks subtree of `type_to_check` for `disallowed_types`."""
for disallowed_type, disallowed_context in disallowed_types.items():
if isinstance(type_to_check, disallowed_type):
raise TypeError('{} has been encountered in the type signature {}. '
'{} is disallowed inside of {}.'.format(
type_to_check,
type_signature,
disallowed_type,
disallowed_context,
))
if isinstance(type_to_check, computation_types.FederatedType):
context = 'federated types (types placed @CLIENT or @SERVER)'
disallowed_types = {
**disallowed_types, computation_types.FederatedType: context,
computation_types.FunctionType: context
}
if isinstance(type_to_check, computation_types.SequenceType):
context = 'sequence types'
disallowed_types = {
**disallowed_types, computation_types.FederatedType: context,
computation_types.SequenceType: context
}
return disallowed_types
preorder_call(type_signature, _check_for_disallowed_type, dict())
def type_tree_contains_only(type_spec, whitelisted_types):
"""Checks whether `type_spec` contains only instances of `whitelisted_types`.
Args:
type_spec: The type specification to check, either an instance of
`computation_types.Type` or something convertible to it by
`computation_types.to_type()`.
whitelisted_types: The singleton or tuple of types for which we wish to
check `type_spec`. Contains subclasses of `computation_types.Type`. Uses
similar syntax to `isinstance`; allows for single argument or `tuple` of
multiple arguments.
Returns:
True if `type_spec` contains only types in `whitelisted_types`, and
`False` otherwise.
"""
type_signature = computation_types.to_type(type_spec)
class WhitelistTracker(object):
"""Simple callable to track Boolean through nested structure."""
def __call__(self, type_to_check, whitelist):
"""Checks subtree of `type_to_check` for `whitelist`."""
if not isinstance(type_to_check, whitelist):
self.whitelisted = False
return whitelist
tracker = WhitelistTracker()
preorder_call(type_signature, tracker, whitelisted_types)
return tracker.whitelisted
def is_tensorflow_compatible_type(type_spec):
"""Checks `type_spec` against an explicit whitelist for `tf_computation`."""
if type_spec is None:
return True
tf_comp_whitelist = (computation_types.TensorType,
computation_types.SequenceType,
computation_types.NamedTupleType)
return type_tree_contains_only(type_spec, tf_comp_whitelist)
def is_generic_op_compatible_type(type_spec):
"""Checks `type_spec` against an explicit whitelist for generic operators."""
if type_spec is None:
return True
tf_comp_whitelist = (computation_types.TensorType,
computation_types.NamedTupleType)
return type_tree_contains_only(type_spec, tf_comp_whitelist)
def is_binary_op_with_upcast_compatible_pair(possibly_nested_type,
type_to_upcast):
"""Checks unambiguity in applying `type_to_upcast` to `possibly_nested_type`.
That is, checks that either these types are equivalent and contain only
tuples and tensors, or that
`possibly_nested_type` is perhaps a nested structure containing only tensors
with `dtype` of `type_to_upcast` at the leaves, where `type_to_upcast` must
be a scalar tensor type. Notice that this relationship is not symmetric,
since binary operators need not respect this symmetry in general.
For example, it makes perfect sence to divide a nested structure of tensors
by a scalar, but not the other way around.
Args:
possibly_nested_type: Convertible to `computation_types.Type`.
type_to_upcast: Convertible to `computation_types.Type`.
Returns:
Boolean indicating whether `type_to_upcast` can be upcast to
`possibly_nested_type` in the manner described above.
"""
possibly_nested_type = computation_types.to_type(possibly_nested_type)
type_to_upcast = computation_types.to_type(type_to_upcast)
if not (is_generic_op_compatible_type(possibly_nested_type) and
is_generic_op_compatible_type(type_to_upcast)):
return False
if are_equivalent_types(possibly_nested_type, type_to_upcast):
return True
if not (isinstance(type_to_upcast, computation_types.TensorType) and
type_to_upcast.shape == tf.TensorShape(())):
return False
types_are_ok = [True]
only_allowed_dtype = type_to_upcast.dtype
transform_type_postorder(possibly_nested_type, _check_tensor_types)
return types_are_ok[0]
def type_tree_contains_types(type_spec, blacklisted_types):
"""Checks whether `type_spec` contains any instances of `blacklisted_types`.
Args:
type_spec: The type specification to check, either an instance of
`computation_types.Type` or something convertible to it by
`computation_types.to_type()`.
blacklisted_types: The singleton or tuple of types for which we wish to
check in `type_spec`. Contains subclasses of `computation_types.Type`.
Uses similar syntax to `isinstance`; allows for single argument or `tuple`
of multiple arguments.
Returns:
True if `type_spec` contains any types in `blacklisted_types`, and
`False` otherwise.
"""
type_signature = computation_types.to_type(type_spec)
class BlacklistTracker(object):
"""Simple callable to track Boolean through nested structure."""
def __call__(self, type_to_check, blacklist):
"""Checks subtree of `type_to_check` for `blacklist`."""
if isinstance(type_to_check, blacklist):
self.blacklisted = True
return blacklist
tracker = BlacklistTracker()
preorder_call(type_signature, tracker, blacklisted_types)
return tracker.blacklisted
def check_all_abstract_types_are_bound(type_spec):
"""Checks that all abstract types labels appearing in 'type_spec' are bound.
For abstract types to be bound, it means that type labels appearing on the
result side of functional type signatures must also appear on the parameter
side. This check is intended to verify that abstract types are only used to
model template-like type signatures, and can always be reduce to a concrete
type by specializing templates to work with specific sets of arguments.
Examples of valid types that pass this check successfully:
int32
(int32 -> int32)
( -> int32)
(T -> T)
((T -> T) -> bool)
(( -> T) -> T)
(<T*, ((T, T) -> T)> -> T)
(T* -> int32)
( -> (T -> T))
<T, (U -> U), U> -> <T, U>
Examples of invalid types that fail this check because 'T' is unbound:
T
(int32 -> T)
( -> T)
(T -> U)
Args:
type_spec: An instance of computation_types.Type, or something convertible
to it.
Raises:
TypeError: if arguments are of the wrong types, or if unbound type labels
occur in 'type_spec'.
"""
def _check_or_get_unbound_abstract_type_labels(type_spec, bound_labels,
check):
"""Checks or collects abstract type labels from 'type_spec'.
This is a helper function used by 'check_abstract_types_are_bound', not to
be exported out of this module.
Args:
type_spec: An instance of computation_types.Type.
bound_labels: A set of string labels that refer to 'bound' abstract types,
i.e., ones that appear on the parameter side of a functional type.
check: A bool value. If True, no new unbound type labels are permitted,
and if False, any new labels encountered are returned as a set.
Returns:
If check is False, a set of new abstract type labels introduced in
'type_spec' that don't yet appear in the set 'bound_labels'. If check is
True, always returns an empty set.
Raises:
TypeError: if unbound labels are found and check is True.
"""
py_typecheck.check_type(type_spec, computation_types.Type)
if isinstance(type_spec, computation_types.TensorType):
return set()
elif isinstance(type_spec, computation_types.SequenceType):
return _check_or_get_unbound_abstract_type_labels(type_spec.element,
bound_labels, check)
elif isinstance(type_spec, computation_types.FederatedType):
return _check_or_get_unbound_abstract_type_labels(type_spec.member,
bound_labels, check)
elif isinstance(type_spec, computation_types.NamedTupleType):
return set().union(*[
_check_or_get_unbound_abstract_type_labels(v, bound_labels, check)
for _, v in anonymous_tuple.iter_elements(type_spec)
])
elif isinstance(type_spec, computation_types.AbstractType):
if type_spec.label in bound_labels:
return set()
elif not check:
return set([type_spec.label])
else:
raise TypeError('Unbound type label \'{}\'.'.format(type_spec.label))
elif isinstance(type_spec, computation_types.FunctionType):
if type_spec.parameter is None:
parameter_labels = set()
else:
parameter_labels = _check_or_get_unbound_abstract_type_labels(
type_spec.parameter, bound_labels, False)
result_labels = _check_or_get_unbound_abstract_type_labels(
type_spec.result, bound_labels.union(parameter_labels), check)
return parameter_labels.union(result_labels)
_check_or_get_unbound_abstract_type_labels(
computation_types.to_type(type_spec), set(), True)
def is_numeric_dtype(dtype):
"""Returns True iff `dtype` is numeric.
Args:
dtype: An instance of tf.DType.
Returns:
True iff `dtype` is numeric, i.e., integer, float, or complex.
"""
py_typecheck.check_type(dtype, tf.DType)
return dtype.is_integer or dtype.is_floating or dtype.is_complex
def is_sum_compatible(type_spec):
"""Determines if `type_spec` is a type that can be added to itself.
Types that are sum-compatible are composed of scalars of numeric types,
possibly packaged into nested named tuples, and possibly federated. Types
that are sum-incompatible include sequences, functions, abstract types,
and placements.
Args:
type_spec: Either an instance of computation_types.Type, or something
convertible to it.
Returns:
`True` iff `type_spec` is sum-compatible, `False` otherwise.
"""
type_spec = computation_types.to_type(type_spec)
if isinstance(type_spec, computation_types.TensorType):
return is_numeric_dtype(type_spec.dtype)
elif isinstance(type_spec, computation_types.NamedTupleType):
return all(
is_sum_compatible(v)
for _, v in anonymous_tuple.iter_elements(type_spec))
elif isinstance(type_spec, computation_types.FederatedType):
return is_sum_compatible(type_spec.member)
else:
return False
def is_structure_of_integers(type_spec):
"""Determines if `type_spec` is a structure of integers.
Args:
type_spec: Either an instance of computation_types.Type, or something
convertible to it.
Returns:
`True` iff `type_spec` is a structure of integers, otherwise `False`.
"""
type_spec = computation_types.to_type(type_spec)
if isinstance(type_spec, computation_types.TensorType):
py_typecheck.check_type(type_spec.dtype, tf.DType)
return type_spec.dtype.is_integer
elif isinstance(type_spec, computation_types.NamedTupleType):
return all(
is_structure_of_integers(v)
for _, v in anonymous_tuple.iter_elements(type_spec))
elif isinstance(type_spec, computation_types.FederatedType):
return is_structure_of_integers(type_spec.member)
else:
return False
def check_federated_type(type_spec,
member=None,
placement=None,
all_equal=None):
"""Checks that `type_spec` is a federated type with the given parameters.
Args:
type_spec: The `tff.Type` to check (or something convertible to it).
member: The expected member type, or `None` if unspecified.
placement: The desired placement, or `None` if unspecified.
all_equal: The desired result of accessing the property
`tff.FederatedType.all_equal` of `type_spec`, or `None` if left
unspecified.
Raises:
TypeError: if `type_spec` is not a federated type of the given kind.
"""
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.FederatedType)
if member is not None:
member = computation_types.to_type(member)
py_typecheck.check_type(member, computation_types.Type)
check_assignable_from(member, type_spec.member)
if placement is not None:
py_typecheck.check_type(placement, placement_literals.PlacementLiteral)
if type_spec.placement is not placement:
raise TypeError(
'Expected federated type placed at {}, got one placed at {}.'.format(
placement, type_spec.placement))
if all_equal is not None:
py_typecheck.check_type(all_equal, bool)
if type_spec.all_equal != all_equal:
raise TypeError(
'Expected federated type with all_equal {}, got one with {}.'.format(
all_equal, type_spec.all_equal))
def is_average_compatible(type_spec):
"""Determines if `type_spec` can be averaged.
Types that are average-compatible are composed of numeric tensor types,
either floating-point or complex, possibly packaged into nested named tuples,
and possibly federated.
Args:
type_spec: An instance of `types.Type`, or something convertible to it.
Returns:
`True` iff `type_spec` is average-compatible, `False` otherwise.
"""
type_spec = computation_types.to_type(type_spec)
if isinstance(type_spec, computation_types.TensorType):
return type_spec.dtype.is_floating or type_spec.dtype.is_complex
elif isinstance(type_spec, computation_types.NamedTupleType):
return all(
is_average_compatible(v)
for _, v in anonymous_tuple.iter_elements(type_spec))
elif isinstance(type_spec, computation_types.FederatedType):
return is_average_compatible(type_spec.member)
else:
return False
def is_assignable_from(target_type, source_type):
"""Determines whether `target_type` is assignable from `source_type`.
Args:
target_type: The expected type (that of the target of the assignment).
source_type: The actual type (that of the source of the assignment), tested
for being a specialization of the `target_type`.
Returns:
`True` iff `target_type` is assignable from `source_type`, or else `False`.
Raises:
TypeError: If the arguments are not TFF types.
"""
target_type = computation_types.to_type(target_type)
source_type = computation_types.to_type(source_type)
py_typecheck.check_type(target_type, computation_types.Type)
py_typecheck.check_type(source_type, computation_types.Type)
if isinstance(target_type, computation_types.TensorType):
return (isinstance(source_type, computation_types.TensorType) and
(target_type.dtype == source_type.dtype) and
_shape_is_assignable_from(target_type.shape, source_type.shape))
elif isinstance(target_type, computation_types.NamedTupleType):
if not isinstance(source_type, computation_types.NamedTupleType):
return False
target_elements = anonymous_tuple.to_elements(target_type)
source_elements = anonymous_tuple.to_elements(source_type)
return ((len(target_elements) == len(source_elements)) and all(
((source_elements[k][0] in [target_elements[k][0], None]) and
is_assignable_from(target_elements[k][1], source_elements[k][1]))
for k in range(len(target_elements))))
elif isinstance(target_type, computation_types.SequenceType):
return (isinstance(source_type, computation_types.SequenceType) and
is_assignable_from(target_type.element, source_type.element))
elif isinstance(target_type, computation_types.FunctionType):
return (isinstance(source_type, computation_types.FunctionType) and
(((source_type.parameter is None) and
(target_type.parameter is None)) or
((source_type.parameter is not None) and
(target_type.parameter is not None) and is_assignable_from(
source_type.parameter, target_type.parameter)) and
is_assignable_from(target_type.result, source_type.result)))
elif isinstance(target_type, computation_types.AbstractType):
# TODO(b/113112108): Revise this to extend the relation of assignability to
# abstract types.
raise TypeError('Abstract types are not comparable.')
elif isinstance(target_type, computation_types.PlacementType):
return isinstance(source_type, computation_types.PlacementType)
elif isinstance(target_type, computation_types.FederatedType):
if (not isinstance(source_type, computation_types.FederatedType) or
not is_assignable_from(target_type.member, source_type.member) or
target_type.all_equal and not source_type.all_equal):
return False
for val in [target_type, source_type]:
py_typecheck.check_type(val.placement,
placement_literals.PlacementLiteral)
return target_type.placement is source_type.placement
else:
raise TypeError('Unexpected target type {}.'.format(target_type))
def are_equivalent_types(type1, type2):
"""Determines whether `type1` and `type2` are equivalent.
We define equivaence in this context as both types being assignable from
one-another.
Args:
type1: One type.
type2: Another type.
Returns:
`True` iff `type1` anf `type2` are equivalent, or else `False`.
"""
if type1 is None:
return type2 is None
else:
return type2 is not None and (is_assignable_from(type1, type2) and
is_assignable_from(type2, type1))
def check_equivalent_types(type1, type2):
"""Checks that `type1` and `type2` are equivalent.
Args:
type1: One type.
type2: Another type.
Raises:
TypeError: If `not are_equivalent_types(type1, type2)`.
"""
if not are_equivalent_types(type1, type2):
raise TypeError('Types {} and {} are not equivalent.')
def convert_to_py_container(value, type_spec):
"""Recursively convert `AnonymousTuple`s to Python containers.
This is in some sense the inverse operation to
`anonymous_tuple.from_container`.
Args:
value: An `AnonymousTuple`, in which case this method recurses, replacing
all `AnonymousTuple`s with the appropriate Python containers if possible
(and keeping AnonymousTuple otherwise); or some other value, in which case
that value is returned unmodified immediately (terminating the recursion).
type_spec: The `tff.Type` to which value should conform, possibly including
`NamedTupleTypeWithPyContainerType`.
Returns:
The input value, with containers converted to appropriate Python
containers as specified by the `type_spec`.
Raises:
ValueError: If the conversion is not possible due to a mix of named
and unnamed values.
"""
if not isinstance(value, anonymous_tuple.AnonymousTuple):
return value
anon_tuple = value
py_typecheck.check_type(type_spec, computation_types.NamedTupleType)
if isinstance(type_spec, computation_types.NamedTupleTypeWithPyContainerType):
container_type = (
computation_types.NamedTupleTypeWithPyContainerType.get_container_type(
type_spec))
container_is_anon_tuple = False
else:
# TODO(b/133228705): Consider requiring NamedTupleTypeWithPyContainerType.
container_is_anon_tuple = True
container_type = anonymous_tuple.AnonymousTuple
# Avoid projecting the AnonymousTuple into a Python container that is not
# supported.
if not container_is_anon_tuple:
num_named_elements = len(dir(anon_tuple))
num_unnamed_elements = len(anon_tuple) - num_named_elements
if num_named_elements > 0 and num_unnamed_elements > 0:
raise ValueError('Cannot represent value {} with container type {}, '
'because value contains a mix of named and unnamed '
'elements.'.format(anon_tuple, container_type))
if (num_named_elements > 0 and
is_container_type_without_names(container_type)):
# NOTE: This could be relaxed in some cases if needed.
raise ValueError(
'Cannot represent value {} with named elements '
'using container type {} which does not support names.'.format(
anon_tuple, container_type))
if (num_unnamed_elements > 0 and
is_container_type_with_names(container_type)):
# Note: This could be relaxed in some cases if needed.
raise ValueError('Cannot represent value {} with unnamed elements '
'with container type {} which requires names.'.format(
anon_tuple, container_type))
elements = []
for index, (elem_name,
elem_type) in enumerate(anonymous_tuple.to_elements(type_spec)):
value = convert_to_py_container(anon_tuple[index], elem_type)
if elem_name is None and not container_is_anon_tuple:
elements.append(value)
else:
elements.append((elem_name, value))
if (py_typecheck.is_named_tuple(container_type) or
py_typecheck.is_attrs(container_type)):
# The namedtuple and attr.s class constructors cannot interpret a list of
# (name, value) tuples; instead call constructor using kwargs. Note
# that these classes already define an order of names internally,
# so order does not matter.
return container_type(**dict(elements))
else:
# E.g., tuple and list when elements only has values,
# but also dict, OrderedDict, or AnonymousTuple when
# elements has (name, value) tuples.
return container_type(elements)
def is_concrete_instance_of(type_with_concrete_elements,
type_with_abstract_elements):
"""Checks whether abstract types can be concretized via a parallel structure.
This function builds up a new concrete structure via the bindings encountered
in `type_with_abstract_elements` in a postorder fashion. That is, it walks the
type trees in parallel, caching bindings for abstract types on the way. When
it encounters a previously bound abstract type, it simply inlines this cached
value. Finally, `abstract_types_can_be_concretized` delegates checking type
equivalence to `are_equivalent_types`, passing in the created concrete
structure for comparison with `type_with_concrete_elements`.
Args:
type_with_concrete_elements: Instance of `computation_types.Type` of
parallel structure to `type_with_concrete_elements`, containing only
concrete types, to test for equivalence with a concretization of
`type_with_abstract_elements`.
type_with_abstract_elements: Instance of `computation_types.Type` which may
contain abstract types, to check for possibility of concretizing according
to `type_with_concrete_elements`.
Returns:
`True` if `type_with_abstract_elements` can be concretized to
`type_with_concrete_elements`. Returns `False` if they are of the same
structure but some conflicting assignment exists in
`type_with_concrete_elements`.
Raises:
TypeError: If `type_with_abstract_elements` and
`type_with_concrete_elements` are not structurally equivalent; that is,
their type trees are of different structure; or if
`type_with_concrete_elements` contains abstract elements.
"""
py_typecheck.check_type(type_with_abstract_elements, computation_types.Type)
py_typecheck.check_type(type_with_concrete_elements, computation_types.Type)
if type_tree_contains_types(type_with_concrete_elements,
computation_types.AbstractType):
raise TypeError(
'`type_with_concrete_elements` must contain no abstract types. You '
'have passed {}'.format(type_with_concrete_elements))
bound_abstract_types = {}
type_error_string = ('Structural mismatch encountered while concretizing '
'abstract types. The structure of {} does not match the '
'structure of {}').format(type_with_abstract_elements,
type_with_concrete_elements)
def _concretize_abstract_types(abstract_type_spec, concrete_type_spec):
"""Recursive helper function to construct concrete type spec."""
if isinstance(abstract_type_spec, computation_types.AbstractType):
bound_type = bound_abstract_types.get(str(abstract_type_spec.label))
if bound_type:
return bound_type
else:
bound_abstract_types[str(abstract_type_spec.label)] = concrete_type_spec
return concrete_type_spec
elif isinstance(abstract_type_spec, computation_types.TensorType):
return abstract_type_spec
elif isinstance(abstract_type_spec, computation_types.NamedTupleType):
if not isinstance(concrete_type_spec, computation_types.NamedTupleType):
raise TypeError(type_error_string)
abstract_elements = anonymous_tuple.to_elements(abstract_type_spec)
concrete_elements = anonymous_tuple.to_elements(concrete_type_spec)
if len(abstract_elements) != len(concrete_elements):
raise TypeError(type_error_string)
concretized_tuple_elements = []
for k in range(len(abstract_elements)):
if abstract_elements[k][0] != concrete_elements[k][0]:
raise TypeError(type_error_string)
concretized_tuple_elements.append(
(abstract_elements[k][0],
_concretize_abstract_types(abstract_elements[k][1],
concrete_elements[k][1])))
return computation_types.NamedTupleType(concretized_tuple_elements)
elif isinstance(abstract_type_spec, computation_types.SequenceType):
if not isinstance(concrete_type_spec, computation_types.SequenceType):
raise TypeError(type_error_string)
return computation_types.SequenceType(
_concretize_abstract_types(abstract_type_spec.element,
concrete_type_spec.element))
elif isinstance(abstract_type_spec, computation_types.FunctionType):
if not isinstance(concrete_type_spec, computation_types.FunctionType):
raise TypeError(type_error_string)
concretized_param = _concretize_abstract_types(
abstract_type_spec.parameter, concrete_type_spec.parameter)
concretized_result = _concretize_abstract_types(abstract_type_spec.result,
concrete_type_spec.result)
return computation_types.FunctionType(concretized_param,
concretized_result)
elif isinstance(abstract_type_spec, computation_types.PlacementType):
if not isinstance(concrete_type_spec, computation_types.PlacementType):
raise TypeError(type_error_string)
return abstract_type_spec
elif isinstance(abstract_type_spec, computation_types.FederatedType):
if not isinstance(concrete_type_spec, computation_types.FederatedType):
raise TypeError(type_error_string)
new_member = _concretize_abstract_types(abstract_type_spec.member,
concrete_type_spec.member)
return computation_types.FederatedType(new_member,
abstract_type_spec.placement,
abstract_type_spec.all_equal)
else:
raise TypeError(
'Unexpected abstract typespec {}.'.format(abstract_type_spec))
concretized_abstract_type = _concretize_abstract_types(
type_with_abstract_elements, type_with_concrete_elements)
return are_equivalent_types(concretized_abstract_type,
type_with_concrete_elements)
def transform_type_postorder(type_signature, transform_fn):
"""Walks type tree of `type_signature` postorder, calling `transform_fn`.
Args:
type_signature: Instance of `computation_types.Type` to transform
recursively.
transform_fn: Transformation function to apply to each node in the type tree
of `type_signature`. Must be instance of Python function type.
Returns:
A possibly transformed version of `type_signature`, with each node in its
tree the result of applying `transform_fn` to the corresponding node in
`type_signature`.
Raises:
TypeError: If the types don't match the specification above.
"""
# TODO(b/134525440): Investigate unifying the recursive methods in type_utils,
# rather than proliferating them.
# TODO(b/134595038): Revisit the change here to add a mutated flag.
py_typecheck.check_type(type_signature, computation_types.Type)
py_typecheck.check_callable(transform_fn)
if isinstance(type_signature, computation_types.FederatedType):
transformed_member, member_mutated = transform_type_postorder(
type_signature.member, transform_fn)
if member_mutated:
type_signature = computation_types.FederatedType(transformed_member,
type_signature.placement,
type_signature.all_equal)
fed_type_signature, type_signature_mutated = transform_fn(type_signature)
return fed_type_signature, type_signature_mutated or member_mutated
elif isinstance(type_signature, computation_types.SequenceType):
transformed_element, element_mutated = transform_type_postorder(
type_signature.element, transform_fn)
if element_mutated:
type_signature = computation_types.SequenceType(transformed_element)
seq_type_signature, type_signature_mutated = transform_fn(type_signature)
return seq_type_signature, type_signature_mutated or element_mutated
elif isinstance(type_signature, computation_types.FunctionType):
transformed_param, param_mutated = transform_type_postorder(
type_signature.parameter, transform_fn)
transformed_result, result_mutated = transform_type_postorder(
type_signature.result, transform_fn)
if param_mutated or result_mutated:
type_signature = computation_types.FunctionType(transformed_param,
transformed_result)
fn_type_signature, fn_mutated = transform_fn(type_signature)
return fn_type_signature, fn_mutated or param_mutated or result_mutated
elif isinstance(type_signature, computation_types.NamedTupleType):
elems = []
elems_mutated = False
for element in anonymous_tuple.iter_elements(type_signature):
transformed_element, element_mutated = transform_type_postorder(
element[1], transform_fn)
elems_mutated = elems_mutated or element_mutated
elems.append((element[0], transformed_element))
if elems_mutated:
if isinstance(type_signature,
computation_types.NamedTupleTypeWithPyContainerType):
type_signature = computation_types.NamedTupleTypeWithPyContainerType(
elems,
computation_types.NamedTupleTypeWithPyContainerType
.get_container_type(type_signature))
else:
type_signature = computation_types.NamedTupleType(elems)
tuple_type_signature, tuple_mutated = transform_fn(type_signature)
return tuple_type_signature, elems_mutated or tuple_mutated
elif isinstance(type_signature,
(computation_types.AbstractType, computation_types.TensorType,
computation_types.PlacementType)):
return transform_fn(type_signature)
def reconcile_value_with_type_spec(value, type_spec):
"""Reconciles the type of `value` with the given `type_spec`.
The currently implemented logic only performs reconciliation of `value` and
`type` for values that implement `tff.TypedObject`. Future extensions may
perform reconciliation for a greater range of values; the caller should not
depend on the limited implementation. This method may fail in case of any
incompatibility between `value` and `type_spec`. In any case, the method is
going to fail if the type cannot be determined.
Args:
value: An object that represents a value.
type_spec: An instance of `tff.Type` or something convertible to it.
Returns:
An instance of `tff.Type`. If `value` is not a `tff.TypedObject`, this is
the same as `type_spec`, which in this case must not be `None`. If `value`
is a `tff.TypedObject`, and `type_spec` is `None`, this is simply the type
signature of `value.` If the `value` is a `tff.TypedObject` and `type_spec`
is not `None`, this is `type_spec` to the extent that it is eqiuvalent to
the type signature of `value`, otherwise an exception is raised.
Raises:
TypeError: If the `value` type and `type_spec` are incompatible, or if the
type cannot be determined..
"""
type_spec = computation_types.to_type(type_spec)
if isinstance(value, typed_object.TypedObject):
return reconcile_value_type_with_type_spec(value.type_signature, type_spec)
elif type_spec is not None:
return type_spec
else:
raise TypeError(
'Cannot derive an eager representation for a value of an unknown type.')
def reconcile_value_type_with_type_spec(value_type, type_spec):
"""Reconciles a pair of types.
Args:
value_type: An instance of `tff.Type` or something convertible to it. Must
not be `None`.
type_spec: An instance of `tff.Type`, something convertible to it, or
`None`.
Returns:
Either `value_type` if `type_spec` is `None`, or `type_spec` if `type_spec`
is not `None` and rquivalent with `value_type`.
Raises:
TypeError: If arguments are of incompatible types.
"""
value_type = computation_types.to_type(value_type)
py_typecheck.check_type(value_type, computation_types.Type)
if type_spec is None:
return value_type
else:
type_spec = computation_types.to_type(type_spec)
if are_equivalent_types(value_type, type_spec):
return type_spec
else:
raise TypeError('Expected a value of type {}, found {}.'.format(
type_spec, value_type))
def get_function_type(type_spec):
"""Constructs a functional type signature for `type_spec`.
Given `type_spec` that is `T`, a functional type signature may be either `T`
itelf if it is a function, or `( -> T)` otherwise. This allows types from
protos to be matched to how types are represented at the level of the Python
wrapping.
Args:
type_spec: An instance of `tff.Type` of something convertible to it.
Returns:
An instance of `tff.FunctionType`, possibly with no argument if `type_spec`
is not functional.
"""
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.Type)
if isinstance(type_spec, computation_types.FunctionType):
return type_spec
else:
return computation_types.FunctionType(None, type_spec)
def get_argument_type(type_spec):
"""Constructs a type signature for functional `type_spec`.
Given `type_spec` of the form `(T -> U)`, an argument type signature is
simple `type_spec` itself. Given `type_spec` of the form `( -> U)`, however,
the argument type signature is simply `U`. This allows types constructed
to match Python wrappers (with no arguments) to be projected back into a form
compatible with how they're represented in protos.
Args:
type_spec: An instance of `tff.FunctionType`.
Returns:
An instance of `tff.Type` as described above.
"""
py_typecheck.check_type(type_spec, computation_types.FunctionType)
if type_spec.parameter is not None:
return type_spec
else:
return type_spec.result
def to_non_all_equal(type_spec):
"""Constructs a non-`all_equal` version of the federated type `type_spec`.
Args:
type_spec: An instance of `tff.FederatedType`.
Returns:
A federated type with the same member and placement, but `all_equal=False`.
"""
py_typecheck.check_type(type_spec, computation_types.FederatedType)
return computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=False)
def check_valid_federated_weighted_mean_argument_tuple_type(type_spec):
"""Checks that `type_spec` is a valid type of a federated weighted mean arg.
Args:
type_spec: An instance of `tff.Type` or something convertible to it.
Raises:
TypeError: If the check fails.
"""
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_not_none(type_spec)
py_typecheck.check_type(type_spec, computation_types.NamedTupleType)
if len(type_spec) != 2:
raise TypeError('Expected a 2-tuple, found {}.'.format(type_spec))
for _, v in anonymous_tuple.iter_elements(type_spec):
check_federated_type(v, None, placement_literals.CLIENTS, False)
if not is_average_compatible(v.member):
raise TypeError(
'Expected average-compatible args, got {} from argument of type {}.'
.format(v.member, type_spec))
w_type = type_spec[1].member
py_typecheck.check_type(w_type, computation_types.TensorType)
if w_type.shape.ndims != 0:
raise TypeError('Expected scalar weight, got {}.'.format(w_type))
| [
2,
406,
600,
355,
25,
21015,
18,
198,
2,
15069,
2864,
11,
383,
309,
22854,
37535,
35089,
515,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
... | 2.72867 | 20,956 |
#!/usr/bin/env python3
from deepconcolic.datasets import load_by_name
from deepconcolic import scripting
from deepconcolic import plotting
from deepconcolic.plotting import plt, subplots, show
from deepconcolic.utils_io import OutputDir, os, sys
from deepconcolic.utils_funcs import as_numpy, np
import argparse
# see https://matplotlib.org/api/pyplot_api.html
plotting.generic_setup (**{
'ytick.labelsize': 'small',
'ytick.major.size': 4,
'ytick.major.width': .4,
'ytick.major.pad': 4,
'ytick.direction': 'in',
'xtick.labelsize': 'small',
'xtick.major.size': 4,
'xtick.major.width': .4,
'xtick.major.pad': 4,
'axes.labelsize': 'medium',
'axes.labelpad': 2.,
'axes.linewidth': .5,
# 'xaxis.labellocation': 'right',
'lines.markersize': 1.5,
'lines.linewidth': .8,
})
plotting.pgf_setup (**{
'ytick.labelsize': 'xx-small',
'ytick.major.size': 2,
'ytick.major.width': .2,
'ytick.major.pad': 2,
'ytick.direction': 'in',
'xtick.labelsize': 'xx-small',
'xtick.major.size': 1,
'xtick.major.width': .1,
'xtick.major.pad': 1,
'axes.labelsize': 'x-small',
'axes.titlesize': 'small',
'axes.formatter.limits': (-2, 2),
'axes.formatter.useoffset': True,
'axes.formatter.use_mathtext': True,
'lines.markersize': .2,
'lines.linewidth': .2,
})
ap = argparse.ArgumentParser ()
ap.add_argument ("path", nargs='?')
ap.add_argument ('--features', dest='features', type = int, default = 32,
help = 'the number of input features to show (default is 32)')
gp = ap.add_mutually_exclusive_group ()
gp.add_argument ('--samples', type = int, default = None, metavar = 'N',
help = 'plot at most N samples with lines')
gp.add_argument ('--samples-only', type = int, default = None, metavar = 'N',
help = 'plot at most N samples with lines, and no boxplot')
except_samples_choices = ('raw', 'ok', 'adv',)
ap.add_argument ('--except-samples', nargs='+', default = [],
choices = except_samples_choices)
ap.add_argument ('--max-plots-per-fig', type = int, default = 4,
help = 'the maximum number of plots per figure (default is 4)')
ap.add_argument ('--max-features-per-plot', type = int, default = 32,
help = 'the maximum number of feature to show in each plot '
'(default is 32)')
ap.add_argument ("--outputs", '--outdir', '-o', dest = "outdir",
help = "the output directory", metavar = "DIR")
args = vars (ap.parse_args())
outdir = OutputDir (args['outdir']) if 'outdir' in args else OutputDir ()
features = args['features']
samples = args['samples'] or args['samples_only']
except_samples = args['except_samples']
boxplots = args['samples_only'] is None
subplots_per_fig = args['max_plots_per_fig']
features_per_subplot = args['max_features_per_plot']
if not boxplots and all (k in except_samples for k in except_samples_choices):
sys.exit ('Nothing to plot')
# Artificial feature names:
names = ['id'] + [str(i) for i in range (0, 561)]
T_ok, T_adv = (None,) * 2
if args['path'] is not None:
dirpath = args['path']
if not os.path.isdir (dirpath):
sys.exit (f"Argument error: {dirpath} is not a valid directory")
T = scripting.read_csv (f'{dirpath}/new_inputs.csv', names = names)
T_ok = np.array([list(l[names[1:]]) for l in T if '-ok-' in l['id']])
T_ok = T_ok.reshape(-1, 561)
T_adv = np.array([list(l[names[1:]]) for l in T if '-adv-' in l['id']])
T_adv = T_adv.reshape(-1, 561)
print (f'Got {len (T_ok)} correctly classified inputs.')
print (f'Got {len (T_adv)} adversarial inputs.')
(x_train, y_train), (x_test, y_test), _, kind, class_names = \
load_by_name ('OpenML:har')
x_train = as_numpy (x_train)
# T_ok = T_ok[:20]
# T_adv = T_adv[:20]
# T_ok = T_ok[:,:20]
# T_adv = T_adv[:,:20]
# X = x_train[:,:20]
X = x_train
# X = X[:, np.argsort (np.min (X, axis = 0), kind = 'stable')[::-1]]
# X = X[:, np.argsort (np.max (X, axis = 0), kind = 'stable')[::-1]]
# sidx = np.argsort (np.median (X, axis = 0), kind = 'stable')[::-1]
# sidx = sidx[:features]
sidx = np.arange (features)
X = X[:, sidx]
T_ok = T_ok[:, sidx] if T_ok is not None else None
T_adv = T_adv[:, sidx] if T_adv is not None else None
s_raw = 'raw' not in except_samples
s_ok = 'ok' not in except_samples and T_ok is not None
s_adv = 'adv' not in except_samples and T_adv is not None
Xs, Ts_ok, Ts_adv = (None,) * 3
if samples is not None:
Xs = X[:min (samples, len (X))] if s_raw else None
Ts_ok = T_ok[:min (samples, len (T_ok))] if s_ok else None
Ts_adv = T_adv[:min (samples, len (T_adv))] if s_adv else None
grey_dot = dict (markerfacecolor='grey', marker='.', markersize = .2)
blue_dot = dict (markerfacecolor='blue', marker='.', markersize = .2)
red_dot = dict (markerfacecolor='red', marker='.', markersize = .2)
features_per_fig = features_per_subplot * subplots_per_fig
num_features = X.shape[1]
for feature_index in range (0, num_features, features_per_fig):
feats = min (features_per_fig, num_features - feature_index)
num_plots = (feats + features_per_subplot - 1) // features_per_subplot
fig, ax = subplots (num_plots)
# fig, ax = subplots (1, num_plots)
# fig.subplots_adjust (left = 0.04, right = 0.99, hspace = 0.1,
# bottom = 0.03, top = 0.99)
ax = ax if isinstance (ax, np.ndarray) else [ax]
for axi, fi in zip (ax, range (feature_index, feature_index + feats,
features_per_subplot)):
max_fi = min (fi + features_per_subplot, num_features)
if boxplots:
axi.boxplot (X[:, fi:max_fi],
widths = .2, vert = True,
labels = [str (f) for f in range (fi, max_fi)],
**boxplot_props ('grey', 'lightgrey',
flierprops = grey_dot))
if T_ok is not None and len (T_ok) > 0:
axi.boxplot (T_ok[:, fi:max_fi],
widths = .4, vert = True,
labels = [''] * (max_fi - fi),
showfliers = False,
**boxplot_props ('blue', 'lightblue', alpha = .6,
flierprops = blue_dot))
if T_adv is not None and len (T_adv) > 0:
axi.boxplot (T_adv[:, fi:max_fi],
widths = .5, vert = True,
labels = [''] * (max_fi - fi),
**boxplot_props ('red', 'lightcoral', alpha = .6,
flierprops = red_dot))
Xr = np.arange (1, max_fi - fi + 1)
if Xs is not None:
plot_lines (Xs, linewidth = .3)
if Ts_ok is not None:
plot_lines (Ts_ok, color = 'blue')
if Ts_adv is not None:
plot_lines (Ts_adv, color = 'red')
ax[-1].set_xlabel ('input features')
show (fig, outdir = outdir, basefilename = f'har-{feature_index}')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
2769,
1102,
4033,
291,
13,
19608,
292,
1039,
1330,
3440,
62,
1525,
62,
3672,
198,
6738,
2769,
1102,
4033,
291,
1330,
36883,
198,
6738,
2769,
1102,
4033,
291,
1330,
29353,
198... | 2.239594 | 3,051 |
stack = []
top = -1
if __name__ == '__main__':
push(10)
push(20)
push(30)
push(40)
push(50)
print(stack)
print("After a pop operation:")
pop()
print(stack)
# Sample Output
# [10, 20, 30, 40, 50]
# After a pop operation:
# [10, 20, 30, 40]
| [
25558,
796,
17635,
198,
4852,
796,
532,
16,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4574,
7,
940,
8,
198,
220,
220,
220,
4574,
7,
1238,
8,
198,
220,
220,
220,
4574,
7,
1270,
8,
198... | 2.137405 | 131 |
#!/usr/bin/python
# encoding: utf-8
#pylint: disable=R0904
""" The altium parser test class """
# upconvert.py - A universal hardware design file format converter using
# Format: upverter.com/resources/open-json-format/
# Development: github.com/upverter/schematic-file-converter
#
# Copyright 2011 Upverter, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from upconvert.parser.altium import Altium
import unittest
class AltiumTests(unittest.TestCase):
""" The tests of the altium parser """
def setUp(self):
""" Setup the test case. """
pass
def tearDown(self):
""" Teardown the test case. """
pass
def test_create_new_altium_parser(self):
""" Test creating an empty parser. """
parser = Altium()
assert parser != None
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
79,
2645,
600,
25,
15560,
28,
49,
2931,
3023,
198,
37811,
383,
5988,
1505,
30751,
1332,
1398,
37227,
198,
198,
2,
510,
1102,
1851,
13,
9078,
532,
... | 3.07277 | 426 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628
] | 1.785714 | 14 |
from collections import OrderedDict
| [
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198
] | 4.111111 | 9 |
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2022 Amano Team
from typing import Optional
from .core import database
conn = database.get_conn()
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
2,
15069,
357,
66,
8,
2864,
12,
1238,
1828,
42614,
78,
4816,
198,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
764,
7295,
1330,
6831,
198,
198,
37043,
796,
6831,
13,
1136,
... | 3.204082 | 49 |
# -*- coding: utf-8 -*-
# For MacOS systems only. Cannot run on a Windows machine.
from colorama import init, Fore
from datetime import datetime
from sys import exit
from os import system
import subprocess
import json
from requests import get
init(True)
title = '''
██ ██ ██
██ ██ ██
██ ██ ██
██ ██ ██
██ ██ ██
████████████████████
██ ██████
██ ██ ██ coffee shop multi-tool ☕️
██ ██ ██ by luca denhez
██ ██████
██ ██
████████████████████████
██ ██
████████████████████
'''
print(Fore.RED + title + '\n')
Coffee.start()
while True:
Coffee.ask()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
1114,
4100,
2640,
3341,
691,
13,
26003,
1057,
319,
257,
3964,
4572,
13,
198,
198,
6738,
3124,
1689,
1330,
2315,
11,
4558,
198,
6738,
4818,
8079,
1330,
4818,
80... | 1.37965 | 914 |
import os.path
import pytest
from gopher_server.handlers import DirectoryHandler, NotFound, PatternHandler, Request
from gopher_server.menu import Menu, MenuItem
BASE_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "examples/data/")
@pytest.fixture
@pytest.fixture
@pytest.mark.asyncio
async def test_directory_handler_file(directory_handler: DirectoryHandler):
"""File path returns file from the directory."""
response = await directory_handler.handle(Request("localhost", 7000, "example"))
with open(os.path.join(BASE_PATH + "example")) as f:
assert response == f.read()
@pytest.mark.asyncio
async def test_directory_handler_binary(directory_handler: DirectoryHandler):
"""Binary files are returned as bytes."""
response = await directory_handler.handle(Request("localhost", 7000, "image.png"))
with open(os.path.join(BASE_PATH + "image.png"), "rb") as f:
assert response == f.read()
@pytest.mark.asyncio
async def test_directory_handler_directory(directory_handler: DirectoryHandler):
"""Directory name returns index file from the directory."""
response = await directory_handler.handle(Request("localhost", 7000, ""))
with open(os.path.join(BASE_PATH + "index")) as f:
assert response == f.read()
@pytest.mark.asyncio
async def test_directory_handler_not_found(directory_handler: DirectoryHandler):
"""Non-existient file raises NotFound."""
with pytest.raises(NotFound):
await directory_handler.handle(Request("localhost", 7000, "qwertyuiop"))
@pytest.mark.asyncio
async def test_directory_handler_with_menus(directory_handler_with_menus: DirectoryHandler):
"""Directory handler with generate_menus generates its own menu."""
response = await directory_handler_with_menus.handle(Request("localhost", 7000, ""))
assert response == Menu([
MenuItem("0", "example", "example", "localhost", 7000),
MenuItem("I", "image.png", "image.png", "localhost", 7000),
MenuItem("0", "index", "index", "localhost", 7000),
MenuItem("1", "test", "test", "localhost", 7000),
])
@pytest.mark.asyncio
async def test_directory_handler_with_menus_subdirectory(directory_handler_with_menus: DirectoryHandler):
"""Directory handler with generate_menus generates its own menu."""
response = await directory_handler_with_menus.handle(Request("localhost", 7000, "test"))
assert response == Menu([
MenuItem("0", "lol", "test/lol", "localhost", 7000),
])
@pytest.fixture
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
async def test_pattern_handler_not_found(pattern_handler: PatternHandler):
"""Unrecognised pattern raises NotFound."""
with pytest.raises(NotFound):
await pattern_handler.handle(Request("localhost", 7000, "qwertyuiop"))
| [
11748,
28686,
13,
6978,
198,
11748,
12972,
9288,
198,
198,
6738,
308,
8803,
62,
15388,
13,
4993,
8116,
1330,
27387,
25060,
11,
1892,
21077,
11,
23939,
25060,
11,
19390,
198,
6738,
308,
8803,
62,
15388,
13,
26272,
1330,
21860,
11,
21860,... | 2.925253 | 990 |
import datetime
import re
from django import forms
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django_countries import Countries
from django_countries.fields import LazyTypedChoiceField
from django_countries.widgets import CountrySelectWidget
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Field, Fieldset, Layout, HTML
from dc17.dates import meal_choices, night_choices
from dc17.models import Accomm, AccommNight, Bursary, Food, Meal
FOOD_LINK = (
'<a href="https://wiki.debconf.org/wiki/DebConf17/Catering" '
'target="blank">More information</a>')
ACCOM_LINK = (
'<a href="https://wiki.debconf.org/wiki/DebConf17/Accomodation" '
'target="blank">More information</a>')
BURSARIES_LINK = (
'<a href="/about/bursaries/" target="blank">DebConf bursary instructions.'
'</a>')
TSHIRT_CHART_LINK = (
'<a href="https://wiki.debconf.org/wiki/DebConf17/TshirtSizes" '
'target="blank">t-shirt sizes chart</a>')
PREAMBLE = (
'<p>Thank you for your interest in attending DebConf17!</p>'
'<p>Please read the following instructions carefully:</p>'
'<ol>'
'<noscript>'
"<li>This registration form uses JavaScript. Without it, you'll have to "
"navigate the validation dragons without any help. And you won't be able "
"to make payments through Stripe.</li>"
'</noscript>'
'<li>Nothing will be saved until the last page of the form, so be sure to '
'work all the way through it.</li>'
'<li>All registration, accommodation and catering fees must be paid '
'either trough the Stripe platform or in person at the front desk upon '
'arrival.</li>'
'<li>Please keep your registration information up to date. You can make '
'changes at any time through this form.</li>'
'<li>Registrations will need to be confirmed before July 1st. '
'We cannot guarantee availability of accommodation, catering or swag for '
'unconfirmed registrations.</li>'
'<li>Badges will be available for pick-up at the front desk.</li>'
'<li>The deadline to apply for a bursary is May 10th. After this date, '
"new bursary applications won't be considered.</li>"
'</ol>'
)
PLAN_DEBCAMP_LABEL = 'I plan to attend DebCamp (31 July to 4 August)'
PLAN_OPENDAY_LABEL = 'I plan to attend Open Day (5 August)'
PLAN_DEBCONF_LABEL = 'I plan to attend DebConf (6 August to 12 August)'
FEES_LABELS = {
'regular': 'Regular - Free',
'pro': 'Professional - 200 CAD',
'corp': 'Corporate - 500 CAD',
}
FINAL_DATES_ESTIMATE_LABEL = "Estimated, I haven't booked travel yet."
FINAL_DATES_FINAL_LABEL = 'Final, I have booked my travel.'
NO_T_SHIRT_LABEL = "I don't want a t-shirt"
STRAIGHT_CUT_LABEL = 'Straight cut'
WOMENS_FITTED_CUT_LABEL = "Women's fitted cut"
T_SHIRT_SIZES = {
'xs': 'Extra Small',
's': 'Small',
'm': 'Medium',
'l': 'Large',
'xl': 'Extra Large',
'2xl': '2X Large',
'3xl': '3X Large',
'4xl': '4X Large',
'5xl': '5X Large',
}
FOOD_ACCOMM_BURSARY_LABEL = 'Food and accommodation only'
TRAVEL_FOOD_ACCOMM_BURSARY_LABEL = 'Travel, food and accommodation'
BURSARY_NEED_LABELS = {
'unable': 'Without this funding, I will be absolutely '
'unable to attend',
'sacrifice': 'Without the requested funding, I will have to '
'make financial sacrifices to attend',
'inconvenient': 'Without the requested funding, attending will '
'be inconvenient for me',
'non-financial': 'I am not applying based on financial need',
}
ACCOMM_CHOICE_LABELS = {
'rvc_single': 'Single room at McGill residences accommodation '
'(30min by public transit)',
'rvc_double': 'Double room at McGill residences accommodation '
'- for couples only - (30min by public transit)',
'hotel': 'Hotel Universel (reserved for families and people with '
'disabilities only',
}
DIET_LABELS = {
'': 'I will be happy to eat whatever is provided',
'vegetarian': "I am lacto-ovo vegetarian, don't provide "
"meat/fish for me",
'vegan': "I am strict vegetarian (vegan), don't provide any "
"animal products for me",
'other': 'Other, described below',
}
REGISTRATION_FORMS = [
PreambleForm,
ContactInformationForm,
ConferenceRegistrationForm,
PersonalInformationForm,
BursaryForm,
FoodForm,
AccommForm,
BillingForm,
ConfirmationForm,
]
| [
11748,
4818,
8079,
198,
11748,
302,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
198,
6738,
42625,
... | 2.690292 | 1,679 |
from ..core import (
KuafuRenderer,
ActorBase,
ActorStatic,
Pose,
Scene,
ArticulationBase,
CameraEntity,
)
from .depth_processor import calc_main_depth_from_left_right_ir
from .sensor_base import SensorEntity
from typing import Optional, Tuple
from copy import deepcopy as copy
import os
import numpy as np
import transforms3d as t3d
| [
6738,
11485,
7295,
1330,
357,
198,
220,
220,
220,
12554,
1878,
84,
49,
437,
11882,
11,
198,
220,
220,
220,
27274,
14881,
11,
198,
220,
220,
220,
27274,
45442,
11,
198,
220,
220,
220,
37557,
11,
198,
220,
220,
220,
28315,
11,
198,
... | 2.844961 | 129 |
print(list(enumerate([])))
print(list(enumerate([1, 2, 3])))
print(list(enumerate([1, 2, 3], 5)))
print(list(enumerate([1, 2, 3], -5)))
print(list(enumerate(range(10000))))
| [
4798,
7,
4868,
7,
268,
6975,
378,
7,
21737,
22305,
198,
4798,
7,
4868,
7,
268,
6975,
378,
26933,
16,
11,
362,
11,
513,
60,
22305,
198,
4798,
7,
4868,
7,
268,
6975,
378,
26933,
16,
11,
362,
11,
513,
4357,
642,
22305,
198,
4798,
... | 2.32 | 75 |
import torch
import numpy as np
import editdistance
import sentencepiece as spmlib
import src.monitor.logger as logger
from src.marcos import BLANK_SYMBOL, IGNORE_ID
from itertools import groupby
# def cal_cer(preds, ys):
# pred = torch.argmax(preds, dim=-1)
# batch_size = pred.size(0)
# cer = 0.0
# for h,y in zip(pred, ys):
# hh = h.tolist()
# hh = [x[0] for x in groupby(hh)]
# hh = [x for x in hh if x != 0] # remove blank
# yy = y.tolist()
# ed = editdistance.eval(hh, yy)
# cer += (float(ed) / len(yy))
# return 100 * cer / batch_size
# def cal_error_rate(pred_pad, y_pad, batch_size, pred_max_len, olens, eos_id, ignore_idx=-1, cal_wer=False):
# pred = torch.argmax(pred_pad, dim=-1).view(batch_size, pred_max_len)
# preds = _get_preds(pred, eos_id)
# mask = (y_pad != ignore_idx)
# ys = _get_ys(y_pad.masked_select(mask), olens)
# cer = 0.0
# for h,y in zip(preds, ys):
# hh = h.tolist()
# yy = y.tolist()
# hh = [x[0] for x in groupby(hh)]
# ed = editdistance.eval(hh,yy)
# cer += (float(ed) / len(yy))
# return 100 * cer / batch_size
# def cal_acc(pred_pad, y_pad, ignore_idx=-1):
# assert pred_pad.size(0) == y_pad.size(0)
# mask = (y_pad != ignore_idx)
# pred = torch.argmax(pred_pad, dim=-1).masked_select(mask)
# y = y_pad.masked_select(mask)
# numerator = torch.sum(pred == y)
# denominator = torch.sum(mask)
# return float(numerator) / float(denominator)
# def _get_preds(pred_pad, eos_id):
# """
# pred_pad: (B, L)
# Return: list of lists
# """
# ret = []
# for vec in pred_pad:
# eos_loc = (vec == eos_id).nonzero()
# if len(eos_loc) > 0:
# stop_point = eos_loc[0][0].item()
# ret.append(vec[:stop_point])
# else:
# ret.append(vec)
# return ret
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4370,
30246,
198,
11748,
6827,
12239,
355,
599,
4029,
571,
198,
198,
11748,
12351,
13,
41143,
13,
6404,
1362,
355,
49706,
198,
6738,
12351,
13,
3876,
6966,
1330,
9878,
15154,
... | 2.050473 | 951 |
import pytest
from part_2 import retrieve_repeat
1_test = [+1, -1]
2_test = [+3, +3, +4, -2, -4]
3_test = [-6, +3, +8, +5, -6]
4_test = [+7, +7, -2, -7, -4]
| [
11748,
12972,
9288,
198,
6738,
636,
62,
17,
1330,
19818,
62,
44754,
198,
198,
16,
62,
9288,
796,
26076,
16,
11,
532,
16,
60,
198,
17,
62,
9288,
796,
26076,
18,
11,
1343,
18,
11,
1343,
19,
11,
532,
17,
11,
532,
19,
60,
198,
18,... | 1.870588 | 85 |
def withinSkull(imgOrig, flag):
"""
fuzzy c-mean image segmentation with weighted
:param imgOrig: grayscale image
:param flag:
:param count_debug:
:return: imgUtil: 2d array
Multiplicao de imgOrig e skull,
:return: skullInside: 2d array
Parte interior do cerebro
:return: skull: 2d array
Cerebro sendo 1, resto sendo 0
:return: se: 2d array
Elementro estruturante 19x19
"""
from data_information.dcm_information import m_uint8
from os.path import abspath, join, dirname
import numpy as np
import cv2
import sys
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
from data_information import dcm_information as di
from AreaLabel import AreaLabel
int1 = np.uint8(1)
int0 = np.uint8(0)
b_matrix = np.where(imgOrig >= 255, int1, int0)
# Fechamento
# Array of 19x19
ee_str = np.array((
[[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]
), dtype=np.uint8)
for x in range(0, 9):
b_matrix = cv2.erode(cv2.dilate(b_matrix, ee_str), ee_str)
_, index, L = AreaLabel(b_matrix)
# Calcular a Area de todos os label encontrados.
if len(index) >= 1:
skull = np.where(L == index[0], int0, int1)
elif len(index) == 0:
skull = 1 - L
# [Matheus] Codigo para representar 'floodfill' do MATLAB
im_flood_fill = np.copy(skull)
h, w = skull.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_flood_fill, mask, (0, 0), 255)
# Invert floodfilled image
# Combine the two images to get the foreground
skull_inside = np.bitwise_or(skull, cv2.bitwise_not(im_flood_fill))
# Segunda Etapa
# Realizar uma erosao
# Obs: realizar uma erosao umas 5 ou 6 vezes para retirar as bordas.
ee_str_2 = np.ones((5, 5), dtype=np.uint8) # ElementoEstruturante 15
if flag == 1:
for i in range(0, 3): # 10
skull_inside = cv2.erode(1 - skull, ee_str_2)
valor, index, L = AreaLabel(skull_inside)
if len(index) > 1:
skull_inside = np.where(L == index[2], int1, int0)
elif len(index) == 0:
skull_inside = L.copy()
else:
skull_inside = np.where((1 - skull) == index,
int1, int0)
# img_util, skull_inside, skull, ee_str
return m_uint8(imgOrig) * skull_inside, \
skull_inside, skull, ee_str, (h, w)
| [
4299,
1626,
15739,
724,
7,
9600,
11610,
11,
6056,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
34669,
269,
12,
32604,
2939,
10618,
341,
351,
26356,
198,
220,
220,
220,
1058,
17143,
33705,
11610,
25,
1036,
592,
38765,
2939,
198... | 1.824364 | 2,044 |
import datetime
import os
from applications.enums import ApplicationStatus, BenefitType
from applications.tests.factories import ApplicationFactory
from calculator.models import Calculation, STATE_AID_MAX_PERCENTAGE_CHOICES
from calculator.tests.factories import PaySubsidyFactory, TrainingCompensationFactory
from common.utils import nested_setattr, to_decimal
from helsinkibenefit.tests.conftest import * # noqa
from openpyxl import load_workbook
from openpyxl.utils import get_column_letter
MAX_TEST_ROW = 100
FIRST_TEST_COLUMN = 3
MAX_TEST_COLUMN = 50 # large enough number that all test columns are included
# unique object
sentinel = object()
SHEETS_TO_TEST = [
("Palkan Helsinki-lisä", SalaryBenefitExcelTest),
("Työllistämisen Helsinki-lisä", EmployeeBenefitExcelTest),
("Palkkatuettu oppisopimus", SalaryBenefitExcelTest),
]
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
6738,
5479,
13,
268,
5700,
1330,
15678,
19580,
11,
38065,
6030,
198,
6738,
5479,
13,
41989,
13,
22584,
1749,
1330,
15678,
22810,
198,
6738,
28260,
13,
27530,
1330,
2199,
14902,
11,
35454,
... | 3.149635 | 274 |
import pytest
from pipda import register_verb, register_func
from pipda.function import *
from pipda.context import Context, ContextEval
from pipda.symbolic import ReferenceAttr, Symbolic
from . import f, identity, identity2, iden, iden2, add2
def test_expr_func(f):
"""Test that we can use expr as a function"""
fun = Function(f.attr.get_data, (), {}, False)
out = fun._pipda_eval(
f, context=Context.EVAL.value
)._pipda_eval(
Data(3), context=Context.EVAL.value
)
assert isinstance(out, int)
assert out == 3
| [
11748,
12972,
9288,
198,
198,
6738,
7347,
6814,
1330,
7881,
62,
19011,
11,
7881,
62,
20786,
198,
6738,
7347,
6814,
13,
8818,
1330,
1635,
198,
6738,
7347,
6814,
13,
22866,
1330,
30532,
11,
30532,
36,
2100,
198,
6738,
7347,
6814,
13,
18... | 2.665094 | 212 |
#!/usr/bin/env python3
import overpass
import hashlib
import logging
from pathlib import Path
from .utils import hash_
from . import defaults
class OSMDataset():
'''
OSM data file from which a route network will be extracted
Parameters
----------
osm_path:
Path to *.osm{.pbf} dataset
name: str, optional
Name of OSMDataset. If not provided, the .osm filename is used.
'''
def get_name(self):
"""Return route network name"""
return self.name
def get_path(self):
"""Return route network path"""
return self.path
@classmethod
def from_overpass(cls, query, name=None, overwrite=False, tmp_dir=defaults.TMP_DIR, **kwargs):
'''
Initialize an OSMDataset by downloading result of an overpass query and saving as .osm
Parameters
----------
query : str
Query to be sent to overpass API. This query should *not* include an `out` directive (eg. [out:xml];)
name : str
Name of the route network
overwrite : bool
Overwrite route network if it already exists on disk
tmp_dir : str
Temporary directory to save route network
Returns
-------
OSMDataset
'''
logger = logging.getLogger(defaults.LOGGER)
## Use md5 hash of query as filename if name not specified
out_folder = Path(tmp_dir)
out_name = name if name else hash_(query)
out_file = out_folder / "{}.osm".format(out_name)
## Honor overwrite settings
if out_file.is_file():
if overwrite:
logger.info("Overwriting {}".format(out_file))
out_file.unlink()
else:
logger.info("Using existing OSMDataset {}".format(out_file))
return cls(out_file, name=name, overwrite=overwrite, tmp_dir=tmp_dir)
logger.info("Downloading OSMDataset {}".format(name))
## Query API
oapi = overpass.API()
xml = oapi.get("[out:xml];{}".format(query),
verbosity="geom",
responseformat="xml",
build=False)
## Save file
out_folder.mkdir(parents=True, exist_ok=True)
out_file.write_text(xml)
return cls(out_file, name=name, overwrite=overwrite, tmp_dir=tmp_dir)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
625,
6603,
198,
11748,
12234,
8019,
198,
11748,
18931,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
764,
26791,
1330,
12234,
62,
198,
6738,
764,
1330,
26235,
... | 2.253521 | 1,065 |
import os
import subprocess
#demo
#getFrequency("183.131.179.4\n183.131.179.5\n183.131.179.6")
| [
11748,
28686,
201,
198,
11748,
850,
14681,
201,
198,
201,
198,
2,
9536,
78,
201,
198,
2,
1136,
37,
28707,
7203,
24839,
13,
22042,
13,
21738,
13,
19,
59,
77,
24839,
13,
22042,
13,
21738,
13,
20,
59,
77,
24839,
13,
22042,
13,
21738,... | 2.061224 | 49 |
import sys
import logging
from unittest import TestCase
from mock import Mock, patch, mock_open
import elasticsearch
import curator
from curator import _version as __version__
from . import CLITestCase
# Get test variables and constants from a single source
from . import testvars as testvars
| [
11748,
25064,
198,
11748,
18931,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
15290,
1330,
44123,
11,
8529,
11,
15290,
62,
9654,
198,
11748,
27468,
12947,
198,
11748,
46132,
198,
6738,
46132,
1330,
4808,
9641,
355,
11593,
9641,... | 4.069444 | 72 |
if __name__ == '__main__':
array = []
print("Array: ", array)
print("1. Enter array")
print("2. Sort array")
while True:
print("Array: ", array)
inp = input("Your choice: ")
if inp == "1":
insert(array, input(" Enter element: "))
elif inp == "2":
array = sort(array)
| [
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
7177,
796,
17635,
198,
220,
220,
220,
3601,
7203,
19182,
25,
33172,
7177,
8,
198,
220,
220,
220,
3601,
7203,
16,
13,
6062,
7177,
4943,
198,... | 2.096386 | 166 |
import pytest
import test_aide as ta
import tests.test_data as d
import pandas as pd
import numpy as np
import tubular
from tubular.base import DataFrameMethodTransformer
class TestInit(object):
"""Tests for DataFrameMethodTransformer.init()."""
def test_arguments(self):
"""Test that init has expected arguments."""
ta.functions.test_function_arguments(
func=DataFrameMethodTransformer.__init__,
expected_arguments=[
"self",
"new_column_name",
"pd_method_name",
"columns",
"pd_method_kwargs",
],
expected_default_values=({},),
)
def test_class_methods(self):
"""Test that DataFrameMethodTransformer has transform method."""
x = DataFrameMethodTransformer(
new_column_name="a", pd_method_name="sum", columns=["b", "c"]
)
ta.classes.test_object_method(
obj=x, expected_method="transform", msg="transform"
)
def test_inheritance(self):
"""Test that DataFrameMethodTransformer inherits from BaseTransformer."""
x = DataFrameMethodTransformer(
new_column_name="a", pd_method_name="sum", columns=["b", "c"]
)
ta.classes.assert_inheritance(x, tubular.base.BaseTransformer)
def test_super_init_called(self, mocker):
"""Test that init calls BaseTransformer.init."""
expected_call_args = {
0: {
"args": (),
"kwargs": {"columns": ["b", "c"], "verbose": True, "copy": False},
}
}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
DataFrameMethodTransformer(
new_column_name="a",
pd_method_name="sum",
columns=["b", "c"],
copy=False,
verbose=True,
)
def test_invalid_input_type_errors(self):
"""Test that an exceptions are raised for invalid input types."""
with pytest.raises(
TypeError,
match=r"unexpected type \(\<class 'int'\>\) for pd_method_name, expecting str",
):
DataFrameMethodTransformer(
new_column_name="a", pd_method_name=1, columns=["b", "c"]
)
with pytest.raises(
TypeError,
match=r"unexpected type \(\<class 'float'\>\) for new_column_name, must be str or list of strings",
):
DataFrameMethodTransformer(
new_column_name=1.0, pd_method_name="sum", columns=["b", "c"]
)
with pytest.raises(
TypeError,
match=r"if new_column_name is a list, all elements must be strings but got \<class 'float'\> in position 1",
):
DataFrameMethodTransformer(
new_column_name=["a", 1.0], pd_method_name="sum", columns=["b", "c"]
)
with pytest.raises(
TypeError,
match=r"""pd_method_kwargs should be a dict but got type \<class 'int'\>""",
):
DataFrameMethodTransformer(
new_column_name=["a", "b"],
pd_method_name="sum",
columns=["b", "c"],
pd_method_kwargs=1,
)
with pytest.raises(
TypeError,
match=r"""unexpected type \(\<class 'int'\>\) for pd_method_kwargs key in position 1, must be str""",
):
DataFrameMethodTransformer(
new_column_name=["a", "b"],
pd_method_name="sum",
columns=["b", "c"],
pd_method_kwargs={"a": 1, 2: "b"},
)
def test_exception_raised_non_pandas_method_passed(self):
"""Test and exception is raised if a non pd.DataFrame method is passed for pd_method_name."""
with pytest.raises(
AttributeError,
match="""error accessing "b" method on pd.DataFrame object - pd_method_name should be a pd.DataFrame method""",
):
DataFrameMethodTransformer(
new_column_name="a", pd_method_name="b", columns=["b", "c"]
)
def test_attributes_set(self):
"""Test that the values passed for new_column_name, pd_method_name are saved to attributes on the object."""
x = DataFrameMethodTransformer(
new_column_name="a", pd_method_name="sum", columns=["b", "c"]
)
ta.classes.test_object_attributes(
obj=x,
expected_attributes={"new_column_name": "a", "pd_method_name": "sum"},
msg="Attributes for DataFrameMethodTransformer set in init",
)
class TestTransform(object):
"""Tests for DataFrameMethodTransformer.transform()."""
def expected_df_1():
"""Expected output of test_expected_output_single_columns_assignment."""
df = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, np.NaN],
"b": [1, 2, 3, np.NaN, 7, 8, 9],
"c": [np.NaN, 1, 2, 3, -4, -5, -6],
"d": [1.0, 3.0, 5.0, 3.0, 3.0, 3.0, 3.0],
}
)
return df
def expected_df_2():
"""Expected output of test_expected_output_multi_columns_assignment."""
df = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, np.NaN],
"b": [1, 2, 3, np.NaN, 7, 8, 9],
"c": [np.NaN, 1, 2, 3, -4, -5, -6],
"d": [0.5, 1.0, 1.5, np.NaN, 3.5, 4.0, 4.5],
"e": [np.NaN, 0.5, 1.0, 1.5, -2.0, -2.5, -3.0],
}
)
return df
def test_arguments(self):
"""Test that transform has expected arguments."""
ta.functions.test_function_arguments(
func=DataFrameMethodTransformer.transform, expected_arguments=["self", "X"]
)
def test_super_transform_called(self, mocker):
"""Test that BaseTransformer.transform called."""
df = d.create_df_3()
x = DataFrameMethodTransformer(
new_column_name="d", pd_method_name="sum", columns=["b", "c"]
)
expected_call_args = {0: {"args": (df.copy(),), "kwargs": {}}}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "transform", expected_call_args
):
x.transform(df)
@pytest.mark.parametrize(
"df, expected",
ta.pandas.adjusted_dataframe_params(d.create_df_3(), expected_df_1()),
)
def test_expected_output_single_columns_assignment(self, df, expected):
"""Test a single column output from transform gives expected results."""
x = DataFrameMethodTransformer(
new_column_name="d",
pd_method_name="sum",
columns=["b", "c"],
pd_method_kwargs={"axis": 1},
)
df_transformed = x.transform(df)
ta.equality.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="DataFrameMethodTransformer sum columns b and c",
)
@pytest.mark.parametrize(
"df, expected",
ta.pandas.adjusted_dataframe_params(d.create_df_3(), expected_df_2()),
)
def test_expected_output_multi_columns_assignment(self, df, expected):
"""Test a multiple column output from transform gives expected results."""
x = DataFrameMethodTransformer(
new_column_name=["d", "e"],
pd_method_name="div",
columns=["b", "c"],
pd_method_kwargs={"other": 2},
)
df_transformed = x.transform(df)
ta.equality.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="DataFrameMethodTransformer divide by 2 columns b and c",
)
@pytest.mark.parametrize(
"df, new_column_name, pd_method_name, columns, pd_method_kwargs",
[
(d.create_df_3(), ["d", "e"], "div", ["b", "c"], {"other": 2}),
(d.create_df_3(), "d", "sum", ["b", "c"], {"axis": 1}),
(
d.create_df_3(),
["d", "e"],
"cumprod",
["b", "c"],
{"axis": 1},
),
(
d.create_df_3(),
["d", "e", "f"],
"mod",
["a", "b", "c"],
{"other": 2},
),
(
d.create_df_3(),
["d", "e", "f"],
"le",
["a", "b", "c"],
{"other": 0},
),
(d.create_df_3(), ["d", "e"], "abs", ["a", "b"], {}),
],
)
def test_pandas_method_called(
self, mocker, df, new_column_name, pd_method_name, columns, pd_method_kwargs
):
"""Test that the pandas method is called as expected (with kwargs passed) during transform."""
spy = mocker.spy(pd.DataFrame, pd_method_name)
x = DataFrameMethodTransformer(
new_column_name=new_column_name,
pd_method_name=pd_method_name,
columns=columns,
pd_method_kwargs=pd_method_kwargs,
)
x.transform(df)
# pull out positional and keyword args to target the call
call_args = spy.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
# test keyword are as expected
ta.equality.assert_dict_equal_msg(
actual=call_kwargs,
expected=pd_method_kwargs,
msg_tag=f"""Keyword arg assert for {pd_method_name}""",
)
# test positional args are as expected
ta.equality.assert_list_tuple_equal_msg(
actual=call_pos_args,
expected=(df[columns],),
msg_tag=f"""Positional arg assert for {pd_method_name}""",
)
| [
11748,
12972,
9288,
198,
11748,
1332,
62,
64,
485,
355,
20486,
198,
11748,
5254,
13,
9288,
62,
7890,
355,
288,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
12202,
934,
198,
6738,
12202,
93... | 1.964692 | 5,098 |
import pytest
from assertpy import assert_that
from common.grid import Direction
import year2020.day20.reader as reader
import year2020.day20.solver as solver
@pytest.mark.solution
@pytest.mark.solution
| [
11748,
12972,
9288,
198,
6738,
6818,
9078,
1330,
6818,
62,
5562,
198,
198,
6738,
2219,
13,
25928,
1330,
41837,
198,
198,
11748,
614,
42334,
13,
820,
1238,
13,
46862,
355,
9173,
198,
11748,
614,
42334,
13,
820,
1238,
13,
82,
14375,
355... | 3.147059 | 68 |
from colorama import init
from termcolor import cprint, colored
init(autoreset=True)
p_color = "yellow"
| [
6738,
3124,
1689,
1330,
2315,
198,
6738,
3381,
8043,
1330,
269,
4798,
11,
16396,
198,
15003,
7,
2306,
382,
2617,
28,
17821,
8,
198,
79,
62,
8043,
796,
366,
36022,
1,
628
] | 3.28125 | 32 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import warnings
import collections
import uuid
from pyloginsight.exceptions import TransportError
from pyloginsight.query import Constraint, Parameter
from pyloginsight import operator
from pyloginsight.models import Event
from pyloginsight.ingestion import serialize_event_object, crush_invalid_field_name
from datetime import datetime
import pytz
import time
import json
"""Examples from "Specifying constraints" section of https://vmw-loginsight.github.io/#Querying-data"""
def test_query_conditions(connection):
"""
Run a live query against a remote server.
"""
# All time, default limit of 100 events
conditions = [Constraint("source", operator.EXISTS), Constraint("timestamp", ">=", 0)]
events = connection.server.events(conditions)
assert isinstance(events, collections.Sized)
def test_ping_pong_message(connection):
"""Ingest a message and then query it back."""
events = None
e = Event(text=str(uuid.uuid4()), fields={'appname': 'pyloginsight test'}, timestamp=datetime.now(pytz.utc).replace(microsecond=0))
connection.server.log(e)
conditions = [Constraint("text", operator.CONTAINS, e['text']), Constraint("timestamp", "=", e['timestamp'])]
# The event will traverse the ingestion pipeline asynchronously.
# Poll the server 100 times with a 0.05 second delay in 5 seconds, plus request overhead
attempt = 0
for attempt in range(100):
events = connection.server.events(conditions)
assert isinstance(events, collections.Sequence)
if len(events) > 0:
break
time.sleep(0.05)
else:
pytest.fail("Timeout waiting for event to appear in query results")
assert len(events) > 0
assert isinstance(events[0], Event)
assert isinstance(events[0].fields, collections.Mapping)
assert isinstance(events[0].timestamp, datetime)
# Other than server-added fields...
for f in ('event_type', 'source', 'hostname'):
try:
del events[0]['fields'][f]
except KeyError:
pass
# The originally-send and query-result events are equal
assert events[0] == e
print("Completed in %d attempts" % attempt)
def test_ingest_single_message(connection):
"""
Create and ingest a new log message with text=uuid and the current datetime.
If the server rejects the event or cannot parse it, this raises an exception.
"""
e = Event(
text=str(uuid.uuid4()),
fields={'appname': 'pyloginsight test'},
timestamp=datetime.now(pytz.utc)
)
connection.server.log(e)
def test_ingest_single_empty_message(connection):
"""It is possible to ingest a completely empty Event. It serializes to {}"""
e = Event()
connection.server.log(e)
def test_ingest_without_any_events_fails(connection):
"""Transmit a single garbage object to a remote Log Insight server."""
with pytest.raises(TransportError):
r = connection.post("/events/ingest/0", json={"foo": "bar"}, sendauthorization=False)
def test_ingest_extra_fields_succeeds(connection):
"""Transmit an empty list of events, along with extra data which should be ignored."""
r = connection.post("/events/ingest/0", json={"events": [], "foo": "bar"}, sendauthorization=False)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
12972,
9288,
198,
11748,
14601,
198,
11748,
17268,
198,
11748,
334,
27112,
198,
198,
6738,
279,
2645,
519,
1040,... | 2.886033 | 1,167 |
from ray.rllib.agents.dqn.distributional_q_tf_model import DistributionalQTFModel
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.models.utils import get_activation_fn
from ray.rllib.utils.framework import try_import_tf
tf1, tf, tfv = try_import_tf()
class CRPCustomQModel(DistributionalQTFModel):
"""Custom model for DQN.""" | [
6738,
26842,
13,
81,
297,
571,
13,
49638,
13,
49506,
77,
13,
17080,
3890,
282,
62,
80,
62,
27110,
62,
19849,
1330,
27484,
282,
48,
10234,
17633,
198,
6738,
26842,
13,
81,
297,
571,
13,
27530,
13,
27110,
13,
44374,
1330,
2593,
66,
... | 2.902098 | 143 |
# Generated by Django 3.0.4 on 2020-04-06 21:43
from django.db import migrations, models
import django.db.models.deletion
import martor.models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
3023,
12,
3312,
2310,
25,
3559,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.9 | 50 |
import turtle
turtle.pensize(5)
turtle.color('blue')
turtle.shape('blank')
turtle.bgcolor('green')
turtle.penup()
turtle.goto(-500,0)
turtle.pendown()
turtle.goto(500,0)
turtle.goto(0,400)
turtle.goto(-500,0)
turtle.mainloop()
| [
11748,
28699,
220,
198,
198,
83,
17964,
13,
79,
641,
1096,
7,
20,
8,
198,
83,
17964,
13,
8043,
10786,
17585,
11537,
198,
83,
17964,
13,
43358,
10786,
27190,
11537,
198,
83,
17964,
13,
35904,
8043,
10786,
14809,
11537,
198,
83,
17964,
... | 2.169811 | 106 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.615385 | 26 |
"""""
Main Client.
"""""
from __future__ import print_function, unicode_literals
from djangular_cli.terminal.prompt import prompt
from djangular_cli.config.style.color_style import style
from djangular_cli.config.style.widget import widget
from djangular_cli.generate.create import cmd_angular, cmd_env, cmd_django
from djangular_cli.git.git import djangular_boilerplate
from djangular_cli.management.find import check_modules
if __name__ == '__main__':
client()
| [
15931,
37811,
198,
13383,
20985,
13,
198,
15931,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
21413,
62,
44506,
13,
23705,
282,
13,
16963,
457,
1330,
6152,
198,
6738,... | 3.270833 | 144 |
"""
Downloads all versions of a file in a GitHub repository.
"""
import argparse
import datetime
import json
import os
import requests
def str_to_date(date_str, fmt='%Y-%m-%d'):
"""Convert string date to date object."""
return datetime.datetime.strptime(date_str, fmt).date()
def str_to_datetime(date_str, fmt='%Y-%m-%dT%H:%M:%SZ'):
"""Convert string date to datetime object."""
return datetime.datetime.strptime(date_str, fmt)
def get_headers():
"""
GitHub API only allows 60 calls per hour from an unauthorized IP.
To authenticate, you need to create a personal access token:
https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token
After creating the token, set it as an environment variable for GITHUB_ACCESS_TOKEN
More details about authentication:
https://docs.github.com/en/rest/guides/getting-started-with-the-rest-api#authentication
"""
try:
access_token = os.environ['GITHUB_ACCESS_TOKEN']
return {
'Authorization' : f"token {access_token}",
}
except KeyError:
return None
def run_download(author, repo_name, branch, file_path, output_dir,
limit_by_day=None, overwrite=False):
"""
For help: `python download_all_versions.py --help`
Sample usage:
`python download_all_versions.py --author youyanggu --repo_name covid19_projections --branch gh-pages --file_path index.md --output_dir output --limit_by_day last`
"""
print('====================================')
print(datetime.datetime.now())
print('Downloading all versions...')
url = f'https://api.github.com/repos/{author}/{repo_name}/commits'
print('URL:', url)
print('Branch:', branch)
print('File:', file_path)
print('Output dir:', output_dir)
print('Limit by day:', limit_by_day)
print('Overwrite:', overwrite)
assert limit_by_day in [None, 'first', 'last'], limit_by_day
os.makedirs(output_dir, exist_ok=True)
file_basename = os.path.basename(file_path)
if get_headers():
print('Using personal GitHub access token (max 5000 calls per hour)')
else:
print(('Missing GitHub access token. '
'Using unauthenticated GitHub requests (max 60 calls per hour)'))
# retrieve information about all commits that modified the file we want
all_commits = []
page = 0
while True:
page += 1
print(f'{file_path} - Fetching page {page}...')
r = requests.get(url,
headers = get_headers(),
params = {
'sha' : branch,
'path': file_path,
'page': str(page),
'per_page' : 1000, # max is 100, default is 30
}
)
if not r.ok or r.text == '[]':
if not r.ok:
print(r, r.text)
break
all_commits += json.loads(r.text or r.content)
print('Num commits:', len(all_commits))
print('Final num commits:', len(all_commits))
if limit_by_day:
# date of each commit, in reverse chronological order
commit_dates = [
str_to_date(commit['commit']['author']['date'][0:10]) for commit in all_commits
]
if limit_by_day == 'first':
# We must reverse the order so it is chronologically increasing
commit_dates = commit_dates[::-1]
else:
commit_dates = [
str_to_datetime(commit['commit']['author']['date']) for commit in all_commits
]
commit_date_to_sha_and_fname = {}
for i, commit_date in enumerate(commit_dates):
commit_date_str = str(commit_date).replace(' ', '_').replace(':', '')
result_path = f'{output_dir}/{file_basename}_{commit_date_str}'
if commit_date not in commit_date_to_sha_and_fname:
commit_date_to_sha_and_fname[commit_date] = (all_commits[i]['sha'], result_path)
assert list(sorted(commit_date_to_sha_and_fname.keys(), reverse=True)) == \
list(commit_date_to_sha_and_fname.keys()), 'dict is not sorted'
for commit_date, sha_and_fname in reversed(commit_date_to_sha_and_fname.items()):
commit_sha, result_path = sha_and_fname
if os.path.isfile(result_path):
if overwrite:
print('File exists, overwriting:', reslut_path)
else:
print('File exists, skipping:', result_path)
continue
url = requests.utils.requote_uri(
f'https://raw.githubusercontent.com/{author}/{repo_name}/{commit_sha}/{file_path}')
resp = requests.get(url, headers=get_headers())
assert len(resp.text) > 0, resp
with open(result_path, 'w') as f:
f.write(resp.text)
print('Saved to:', result_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--author', help='Author name', required=True)
parser.add_argument('--repo_name', help='Repo name', required=True)
parser.add_argument('--branch', help='Branch name', default='main')
parser.add_argument('--file_path', help='Relative path of file in the repo', required=True)
parser.add_argument('--output_dir', help='Output directory location', required=True)
parser.add_argument('--limit_by_day', help=('If you only want to keep the first or'
' last file on each commit date, specify `first` or `last`'), choices=['first', 'last'])
parser.add_argument('-o', '--overwrite', action='store_true', help='Overwrite files')
args = parser.parse_args()
run_download(
args.author,
args.repo_name,
args.branch,
args.file_path,
args.output_dir,
args.limit_by_day,
args.overwrite,
)
print('--------------------')
print('Done', datetime.datetime.now())
| [
37811,
198,
10002,
82,
477,
6300,
286,
257,
2393,
287,
257,
21722,
16099,
13,
198,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
11748,
7007,
628,
198,
4299,
965,
62,
... | 2.39689 | 2,444 |
import librosa
import torch
from numpy.testing import assert_almost_equal
from scipy.signal import convolve as scipy_convolve
from tests.utils import TEST_FIXTURES_DIR
from torch_audiomentations.utils.convolution import convolve as torch_convolve
| [
11748,
9195,
4951,
64,
198,
11748,
28034,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
28177,
62,
40496,
198,
6738,
629,
541,
88,
13,
12683,
282,
1330,
3063,
6442,
355,
629,
541,
88,
62,
42946,
6442,
198,
198,
6738,
5254,
13,
2... | 3.410959 | 73 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import logging
import lxml.etree
from harvester.ext.converter.base import ConverterPluginBase
from harvester.utils import slugify, XPathHelper, flatten_dict, report_progress
from .tags_map import TAGS_MAP
from .constants import LINKED_XML_NSMAP, API_XML_NSMAP
logger = logging.getLogger(__name__)
ORG_SIAT = 'pat-sistema-informativo-ambiente-e-territorio'
ORG_CATASTO = 'pat-s-catasto'
LICENSES_MAP = {
1: 'cc-zero',
2: 'cc-by',
}
ORGANIZATIONS = {
ORG_SIAT: {
'name': ORG_SIAT,
'title': 'PAT Sistema Informativo Ambiente e Territorio',
'description': 'SIAT. Entità territoriali geo-referenziate,'
' con associate informazioni sulle relative proprietà.',
'image_url': 'http://dati.trentino.it/images/logo.png',
'type': 'organization',
'is_organization': True,
'state': 'active',
'tags': [],
},
ORG_CATASTO: {
'name': ORG_CATASTO,
'title': 'PAT S. Catasto',
'description':
"Il Servizio Catasto della Provincia autonoma di Trento cura le"
"seguenti attività: - sovrintende alle operazioni di"
"conservazione del catasto fondiario e fabbricati; - svolge le"
"funzioni di controllo, di verifica e di ispezione delle attività"
"connesse alla tenuta del catasto; - cura, in accordo con la"
"struttura competente in materia di Sistema informativo"
"elettronico provinciale, la definizione dei programmi di"
"informatizzazione dei servizi del catasto nel contesto di una"
"coordinata realizzazione del sistema informatico/informativo. -"
"cura le revisioni periodiche degli estimi catastali e l’attività"
"di raffittimento della rete geodetica del territorio provinciale",
'image_url': 'http://dati.trentino.it/images/logo.png',
'type': 'organization',
'is_organization': True,
'state': 'active',
'tags': [],
},
}
GROUPS = {
'gestione-del-territorio': {
'name': 'gestione-del-territorio',
'title': 'Gestione del territorio',
'description': 'Viabilità, idrografia, aree protette, toponomastica, '
'orografia, uso del suolo, ecc.',
}
}
def _clean_tags(tags, tags_map=TAGS_MAP):
"""Clean tags according to the map"""
new_tags = set()
for tag in tags:
new_tags.update(_clean_tag(tag))
return sorted(new_tags)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
198,
11748,
300,
19875,
13,
316,
631,
198,
198,
673... | 2.254039 | 1,114 |
# -*- coding: utf-8 -*-
"""
Seminario Python
Debug 1
Primer script en el que vamos a utilizar las herramientas
de depuración de Spyder
@author: D. G. Reina
"""
#%%
j = int()
for i in range(10):
print i
j = j + i # añadir un break point en esta linea
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
201,
198,
13900,
259,
4982,
11361,
201,
198,
201,
198,
27509,
352,
201,
198,
201,
198,
23828,
263,
4226,
551,
1288,
8358,
410,
321,
418,
257,
773... | 2.027397 | 146 |
#!/usr/bin/env python
# Name: task.py
# Time:8/11/16 10:23 AM
# Author:luo1fly
from django.db import transaction
from hosts import models
import subprocess
from AntiMagic import settings
import os
import json
# from django.utils.datastructures import MultiValueDictKeyError
# import your modules above
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
6530,
25,
4876,
13,
9078,
198,
2,
3862,
25,
23,
14,
1157,
14,
1433,
838,
25,
1954,
3001,
198,
2,
6434,
25,
2290,
78,
16,
12254,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
8... | 3.290323 | 93 |
#---------------------------------------
# Import Libraries
#---------------------------------------
import clr
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
import os
import json
import codecs
#---------------------------------------
# [Required] Script Information
#---------------------------------------
ScriptName = "NA Effect Changer"
Website = "https://www.twitch.tv/CyberHumi"
Creator = "CyberHumi"
Version = "1.6.1"
Description = "Nanoleaf Aurora Effect Changer"
#---------------------------------------
# Set Variables
#---------------------------------------
configFile = "NAEC.json"
batFile = "NAEC_SLCB_CLIENT.bat"
settings = {}
#---------------------------------------
# read config file
#---------------------------------------
#---------------------------------------
# [Required] Initialize Data / Load Only
#---------------------------------------
#---------------------------
# [Optional] Reload Settings (Called when a user clicks the Save Settings button in the Chatbot UI)
#---------------------------
#---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
#---------------------------
#---------------------------
# Weblinks
#---------------------------
| [
2,
3880,
26866,
201,
198,
2,
220,
220,
17267,
46267,
201,
198,
2,
3880,
26866,
201,
198,
11748,
537,
81,
201,
198,
565,
81,
13,
4550,
26687,
7203,
22797,
37906,
13,
17861,
578,
13,
12736,
4943,
201,
198,
565,
81,
13,
4550,
26687,
... | 3.768802 | 359 |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
11787,
12443,
341,
8479,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198
] | 3.542857 | 35 |
import os
import csv
csv_path = os.path.join("budget_data.csv")
with open(csv_path, newline="") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
#print(csv_file)
next(csv_reader,None)
PyBank(csv_reader) | [
11748,
28686,
198,
11748,
269,
21370,
198,
198,
40664,
62,
6978,
796,
28686,
13,
6978,
13,
22179,
7203,
37315,
62,
7890,
13,
40664,
4943,
198,
198,
4480,
1280,
7,
40664,
62,
6978,
11,
649,
1370,
2625,
4943,
355,
269,
21370,
62,
7753,
... | 2.36 | 100 |
# -*- coding:utf-8 -*-
# https://leetcode.com/problems/reverse-linked-list-ii/description/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
50188,
12,
25614,
12,
4868,
12,
4178,
14,
11213,
14,
198,
198,
2,
30396,
329,
1702,
306,
12,
25614,
1351,
1... | 2.155172 | 116 |
from keras.layers import Input, Activation, Dense,Flatten, BatchNormalization, Add, Conv2D, MaxPooling2D
from keras.layers import AveragePooling2D,Permute,Reshape,LSTM,Lambda,GRU,Bidirectional,BatchNormalization,Concatenate
from keras import regularizers
from keras.optimizers import Adam
from utils import sharpe_ratio_loss,sharpe_ratio
import keras.backend as K
from keras.models import Model
| [
6738,
41927,
292,
13,
75,
6962,
1330,
23412,
11,
13144,
341,
11,
360,
1072,
11,
7414,
41769,
11,
347,
963,
26447,
1634,
11,
3060,
11,
34872,
17,
35,
11,
5436,
27201,
278,
17,
35,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
13475,
2... | 2.947761 | 134 |
from prompt_toolkit import print_formatted_text, ANSI
| [
6738,
6152,
62,
25981,
15813,
1330,
3601,
62,
687,
16898,
62,
5239,
11,
3537,
11584,
628
] | 3.4375 | 16 |
import logging
import shutil
from pathlib import Path
from typing import Tuple, Optional
import pandas as pd
from genomics_data_index.storage.MaskedGenomicRegions import MaskedGenomicRegions
from genomics_data_index.storage.io.SampleData import SampleData
from genomics_data_index.storage.io.mutation.VariationFile import VariationFile
from genomics_data_index.storage.io.mutation.VcfSnpEffAnnotationParser import VcfSnpEffAnnotationParser
from genomics_data_index.storage.model import NUCLEOTIDE_UNKNOWN_TYPE
from genomics_data_index.storage.util import TRACE_LEVEL
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
4423,
346,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
309,
29291,
11,
32233,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
2429,
31994,
62,
7890,
62,
9630,
13,
35350,
13,
45195,
... | 3.268817 | 186 |
#!/usr/bin/env python
# coding: utf-8
"""
voxel_renderer.py
Voxel描画用オブジェクトVoxelRendererを包含するモジュール
"""
import warnings
from OpenGL import GL
from abstract_renderer import AbstractRenderer
from src.model.voxel import Voxel
from src.render.object import cube
class VoxelRenderer(AbstractRenderer):
"""
OpenGL上にボクセルデータを描画するクラス
"""
def set_voxel(self, voxel):
"""
ボクセルのmutator
:type voxel: Voxel
:param voxel: ボクセルクラスオブジェクト
:return:
"""
self.__voxel = voxel
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
198,
85,
1140,
417,
62,
10920,
11882,
13,
9078,
198,
198,
53,
1140,
417,
162,
237,
237,
18796,
119,
18796,
101,
20513,
24001,
... | 1.883392 | 283 |
import unittest
from ..compare_versions import compare_versions
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
11748,
555,
715,
395,
198,
198,
6738,
11485,
5589,
533,
62,
47178,
1330,
8996,
62,
47178,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
7,
19011,
16579,
28,
17... | 2.8 | 45 |
import enum
import pathlib
from typing import Union
import yaml
from pydantic import BaseModel
from yaml import CDumper as Dumper
from yaml import CLoader as Loader
class LogLevel(str, enum.Enum):
"""Enumeration of supported log levels"""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
CRITICAL = "critical"
class ConfigOptions(BaseModel):
"""An object used to manage configuration options
Each attribute represents an option in the config file.
This object can be subclassed to add parameters.
"""
loglevel: LogLevel = LogLevel.INFO
@classmethod
def from_file(cls, filename: Union[str, pathlib.Path]) -> "ConfigOptions":
"""Load Configuration from a .yaml file
Parameters
----------
filename
"""
with open(filename, "r") as f:
state = yaml.load(f, Loader=Loader)
return cls.parse_obj(state)
def to_file(self, filename: Union[str, pathlib.Path]) -> None:
"""Save configuration to a file"""
try:
with open(filename, "w") as f:
state = self.dict()
print(state)
yaml.dump(state, f, Dumper=Dumper)
print("Saved configuration to {}".format(filename))
except FileNotFoundError as e:
print("Error writing config file {}".format(filename))
raise e
| [
11748,
33829,
198,
11748,
3108,
8019,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
331,
43695,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
6738,
331,
43695,
1330,
6458,
15829,
355,
360,
15829,
198,
6738,
331,
43695,
1330,
7852,
... | 2.5 | 568 |
# -*- coding: utf-8 -*-
import bs4
import requests
import re
from firebase import firebase
firebase=firebase.FirebaseApplication('https://sample1-ae95d.firebaseio.com/')
firebase.delete('/image',None)
res=requests.get('https://indianexpress.com/section/sports/')
soup=bs4.BeautifulSoup(res.text,'lxml')
title=soup.select('.title')
for articles in soup.select('.articles'):
for title in articles.select('.title'):
for p in articles.select('p'):
for image in articles.select('.snaps'):
for link in image.select('img'):
if link.get('src')=='https://s0.wp.com/wp-content/themes/vip/plugins/lazy-load-0.7/images/1x1.trans.gif':
continue
else:
print(link.get("src"))
print(title.text)
for i in title.find_all('a',href=True):
print(i['href'])
print(p.text)
print('\n\n')
additem=firebase.post('/image',{'name':p.text,'uri':link.get("src"),'title':title.text,'link':i['href']})
res2=requests.get('http://www.espn.in/football/')
soup2=bs4.BeautifulSoup(res2.text,'lxml')
title2=soup2.select('section')
print(title2)
austeam=requests.get('http://www.espncricinfo.com/australia/content/player/country.html?country=2')
aussoup=bs4.BeautifulSoup(austeam.text,'lxml')
for player in aussoup.select('.playersTable'):
for i in player.select('tr'):
for j in i.select('td'):
print(j.text)
print(player.text)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
275,
82,
19,
198,
11748,
7007,
198,
11748,
302,
198,
6738,
2046,
8692,
1330,
2046,
8692,
198,
6495,
8692,
28,
6495,
8692,
13,
13543,
8692,
23416,
10786,
5450,
137... | 1.949112 | 845 |
valores = []
for i in range(5):
print(f'vakores = {valores}.')
num = int(input(f'Digite um valor para a Posição {i}: '))
fim = True
for p, v in enumerate(valores):
if v > num:
valores.insert(p, num)
fim = False
break
if fim:
valores.append(num)
print(f'Você digitou os valores {valores}.')
| [
2100,
2850,
796,
17635,
198,
1640,
1312,
287,
2837,
7,
20,
2599,
198,
220,
220,
220,
3601,
7,
69,
6,
85,
461,
2850,
796,
1391,
2100,
2850,
92,
2637,
8,
198,
220,
220,
220,
997,
796,
493,
7,
15414,
7,
69,
6,
19511,
578,
23781,
... | 1.930851 | 188 |
#!/usr/bin/env python3
import sys
import struct
from typing import NamedTuple
import argparse
import zlib
import os
import logging
"""
The Retron5 data format is:
typedef struct
{
uint32_t magic;
uint16_t fmtVer;
uint16_t flags;
uint32_t origSize;
uint32_t packed_size;
uint32_t data_offset;
uint32_t crc32;
uint8_t data[0];
} t_retronDataHdr;
"""
# Command line arguments
parser = argparse.ArgumentParser(description="Read and write Retron5 save files")
parser.add_argument("-d", "--debug", action="store_true", dest="debug", default=False, help="Display debug information")
parser.add_argument("-t", "--to-retron", action="store_true", dest="to_retron", default=False, help="Convert to Retron5 format. Otherwise, convert from Retron5 format")
requiredArguments = parser.add_argument_group('required arguments')
requiredArguments.add_argument("-i", "--input-file", dest="input_filename", type=str, help="File to read in", required=True)
requiredArguments.add_argument("-o", "--output-dir", dest="output_directory", type=str, help="Directory to place the outputted file", required=True)
args = parser.parse_args()
base_filename = os.path.splitext(os.path.basename(args.input_filename))[0] # Pull out just the file name: "/path/to/filename.ext" => "filename"
output_filename = os.path.join(args.output_directory, base_filename)
log_level = logging.INFO
if args.debug:
log_level = logging.DEBUG
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
if args.to_retron:
output_filename += ".sav"
Retron5SaveFiles.pack_to_retron_save_file(args.input_filename, output_filename)
else:
output_filename += ".srm" # FIXME: Need to change this per platform?
Retron5SaveFiles.extract_from_retron_save_file(args.input_filename, output_filename)
sys.exit(0) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
2878,
198,
6738,
19720,
1330,
34441,
51,
29291,
198,
11748,
1822,
29572,
198,
11748,
1976,
8019,
198,
11748,
28686,
198,
11748,
18931,
198,
198,
37811,
... | 2.932367 | 621 |
# Complexity: O(n)
# Complexity: O(n)
f = open('../data/day01.txt', 'r')
depths = list(map(int, f.readlines()))
f.close()
print(count_increases(depths))
print(count_increases_sliding_window(depths))
| [
198,
2,
19157,
414,
25,
440,
7,
77,
8,
198,
198,
2,
19157,
414,
25,
440,
7,
77,
8,
628,
198,
69,
796,
1280,
10786,
40720,
7890,
14,
820,
486,
13,
14116,
3256,
705,
81,
11537,
198,
10378,
9998,
796,
1351,
7,
8899,
7,
600,
11,
... | 2.372093 | 86 |
from jackal import Credential, CredentialSearch, UserSearch
from jackal.utils import print_error, print_notification, print_success
import argparse
import re
import os
if __name__ == '__main__':
import_secretsdump()
| [
6738,
14509,
282,
1330,
327,
445,
1843,
11,
327,
445,
1843,
18243,
11,
11787,
18243,
198,
6738,
14509,
282,
13,
26791,
1330,
3601,
62,
18224,
11,
3601,
62,
1662,
2649,
11,
3601,
62,
13138,
198,
11748,
1822,
29572,
198,
11748,
302,
198... | 3.279412 | 68 |
# -*- coding: utf-8 -*-
"""
@author: jens
@modifier: informaton
inf_narco_biomarker --> inf_narco_app
"""
import json # for command line interface input and output.
import os, sys, warnings
from pathlib import Path
import logging
# from asyncore import file_dispatcher
# from typing import Any, Union
import gpflow as gpf
# For hypnodensity plotting ...
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
from inf_config import AppConfig # for AppConfig() <-- narco_biomarker(), [previously]
from inf_hypnodensity import Hypnodensity # from inf_extract_features import ExtractFeatures --> moved to
# inf_hypnodensity.py
from inf_narco_model import NarcoModel
# for auditing code speed.
import time
warnings.simplefilter('ignore', FutureWarning) # warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
DEBUG_MODE = False
STANDARD_EPOCH_SEC = 30
DEFAULT_SECONDS_PER_EPOCH = 30
DEFAULT_MINUTES_PER_EPOCH = 0.5 # 30/60 or DEFAULT_SECONDS_PER_EPOCH/60;
# The algorithm produces values between −1 and 1, with 1 indicating a high probability of narcolepsy.
# The cut-off threshold between narcolepsy type 1 and “other“ is set at −0.03.
# Ref: https://www.nature.com/articles/s41467-018-07229-3
NARCOLEPSY_PREDICTION_CUTOFF = -0.03
DIAGNOSIS = ["Other", "Narcolepsy type 1"]
if __name__ == '__main__':
outputFormat = 'json'
if sys.argv[1:]: # if there are at least three arguments (two beyond [0])
_edf_filename = sys.argv[1]
# For hard coding/bypassing json input argument, uncomment the following: jsonObj = json.loads('{
# "channel_indices":{"centrals":[3,4],"occipitals":[5,6],"eog_l":7,"eog_r":8,"chin_emg":9},
# "show":{"plot":false,"hypnodensity":false,"hypnogram":false}, "save":{"plot":false,"hypnodensity":true,
# "hypnogram":true}}')
json_str = json.loads(sys.argv[2])
try:
main(_edf_filename, json_str)
except OSError as oserr:
print("OSError:", oserr)
else:
print(sys.argv[0], 'requires two arguments when run as a script')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
474,
641,
198,
31,
4666,
7483,
25,
4175,
13951,
198,
198,
10745,
62,
23955,
1073,
62,
8482,
296,
668,
263,
14610,
1167,
62,
23955,
1073,
62,
... | 2.555168 | 861 |
from rest_framework.throttling import AnonRateThrottle, UserRateThrottle
# def parse_rate(self, rate):
# """
# returns a tuple: <allowed number of requests>, <period of time in seconds>
# which is fixed to allow 1 request every 30 seconds
# """
# return (1, 60 * 60) # one request per hour
| [
6738,
1334,
62,
30604,
13,
26110,
926,
1359,
1330,
49347,
32184,
817,
305,
23296,
11,
11787,
32184,
817,
305,
23296,
628,
628,
198,
2,
220,
220,
220,
220,
825,
21136,
62,
4873,
7,
944,
11,
2494,
2599,
198,
2,
220,
220,
220,
220,
2... | 2.575758 | 132 |
# app
from ._base import BaseMarker
from ._string import StringMarker
from ._version import VersionMarker
__all__ = ['BaseMarker', 'StringMarker', 'VersionMarker']
| [
2,
598,
198,
6738,
47540,
8692,
1330,
7308,
9704,
263,
198,
6738,
47540,
8841,
1330,
10903,
9704,
263,
198,
6738,
47540,
9641,
1330,
10628,
9704,
263,
628,
198,
834,
439,
834,
796,
37250,
14881,
9704,
263,
3256,
705,
10100,
9704,
263,
... | 3.458333 | 48 |
import datetime
import os
import king_phisher.find as find
import king_phisher.serializers as serializers
import king_phisher.client.plugins as plugins
import king_phisher.client.gui_utilities as gui_utilities
def is_managed_key(key):
"""
Return True for configuration keys which should be managed by this
plugin. This is to let keys for other configuration settings remain the
same.
:param str key: The name of the configuration key.
:return: Whether or not the key should be managed by this plugin.
:rtype: bool
"""
if key == 'mailer.company_name':
return False
if key.startswith('mailer.'):
return True
if key in ('remove_attachment_metadata', 'spf_check_level'):
return True
return False
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
11748,
5822,
62,
746,
4828,
13,
19796,
355,
1064,
198,
11748,
5822,
62,
746,
4828,
13,
46911,
11341,
355,
11389,
11341,
198,
11748,
5822,
62,
746,
4828,
13,
16366,
13,
37390,
355,
20652,
... | 3.331776 | 214 |
import pygame
from constant import PLACE_IN_IMAGE
from globals import poles
from helpers.DataHelper import load_image, cut_sheet
class Portal(pygame.sprite.Sprite):
"""Портал"""
image = load_image('portal.png', -1)
def cross(self, group):
"""метод проверяет пересечение спрайта Portal с любым из спрайтов группы group"""
for sprite in group:
if pygame.sprite.collide_mask(self, sprite):
return True
return False
| [
11748,
12972,
6057,
198,
198,
6738,
6937,
1330,
9297,
11598,
62,
1268,
62,
3955,
11879,
198,
6738,
15095,
874,
1330,
24971,
198,
6738,
49385,
13,
6601,
47429,
1330,
3440,
62,
9060,
11,
2005,
62,
21760,
628,
198,
4871,
25663,
7,
9078,
... | 1.963265 | 245 |
#!/usr/bin/env python3.6
# this is code is not thought to be used directly, but rather as a basis for understanding how
# to implement a webproxy with websocket support in python
from aiohttp import web
from aiohttp import client
import aiohttp
import asyncio
import logging
import pprint
import re
import string
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
baseUrl = 'http://0.0.0.0:8080'
mountPoint = '/fakeUuid'
app = web.Application()
app.router.add_route('*',mountPoint + '{proxyPath:.*}', handler)
web.run_app(app,port=3985)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
21,
198,
198,
2,
428,
318,
2438,
318,
407,
1807,
284,
307,
973,
3264,
11,
475,
2138,
355,
257,
4308,
329,
4547,
703,
198,
2,
284,
3494,
257,
3992,
36436,
351,
2639,
5459,
1104,
... | 3.037433 | 187 |
import os
import sys
sys.path.append(os.path.join(os.getcwd().split('xtraderbacktest')[0],'xtraderbacktest'))
import modules.other.sys_conf_loader as sys_conf_loader
import telegram
import json
telegram_conf = sys_conf_loader.get_sys_conf()["notification"]["telegram"]
chat_id = telegram_conf["chat_id"]
token = telegram_conf["token"]
bot = None
if __name__ == "__main__":
send_message("test",{"test_obj":"test_content"}) | [
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
1136,
66,
16993,
22446,
35312,
10786,
742,
81,
5067,
1891,
9288,
11537,
58,
15,
60,
4032,
742,
81,
5067,
1891,
9288,
6,
4008,... | 2.734177 | 158 |
import json
import re
import string
import nltk
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
import joblib
from ..base import BaseTransformer
lem = WordNetLemmatizer()
tokenizer = TweetTokenizer()
nltk.download('wordnet')
nltk.download('stopwords')
eng_stopwords = set(stopwords.words("english"))
with open('steps/resources/apostrophes.json', 'r') as f:
APPO = json.load(f)
| [
11748,
33918,
198,
11748,
302,
198,
11748,
4731,
198,
198,
11748,
299,
2528,
74,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
198,
6738,
299... | 2.897727 | 176 |
from collections import Counter
from Crypto.Util.number import *
import sys
import random
import sympy
flag = open("flag.txt", "rb").read()
print("Loading... this may take a while.")
# p, q = getPrime(4096), getPrime(4096)
p = 1026425489811410154939564330855126910294054624327594068393599330304159964093796641513751922314812554020064574891490673912596072620168714995403643835061896188790756223382222617571855106445326142739551265677862164207875957516478182236814947302551022524484925137164027825054079357474914319847907281582251076726102493643151425255226777384202284599156369363373563642604745988351539992375932670122375621885209873017785436016020435017978053427514364179198035581419291064193143912481690001354300057585042828156962675239775071887979684757536819513306850763668517928489517504927452552162939872700807640351677493869972758820579169090676582753434717869612768936969479106103677792036592591525253773791964700899607724830220666704363012347297530024777101853264936262593009663488486590961760704732696757346560710898582595069246914920687953157305824804491250148764002866105497661178187082037356062332307348555641200201094332852115589136147061683254988284911411038232213180436604722460221221683317461752299144307920409776427279433732673595593846759912534482804113536355504073009187275379612098049501463654478795374584247337053340452322946778347546797205954284458371217689790568891843847863509978597873910159042514411161754488700822562383501114340695307
q = 1043395680645709616430238458303195628267595941826074884197610956272518685169237892257685338085515270472552822744247261654423963842191142037642837920964800633693576392267555249330411657032553703156253205694716092034139527191916079517847830974370662628631791588855728122525062546019057198352330260052007852705437549448884983289498940758993888339215908248039577148907368366410455106129378046322061894612017658889241375715407634414995796890538237944336144443711878126149718493789401770495535773111695769971508315510017254311001534617670751777218597367572636211463332971996559344483815910952592503307235782641373579572043768640608292900021752003390020186654466042409056433113656899144822544860046502540981609954568853744595384279026309061485766510484430418314523778355368947845899338306665838799449847600194704170302644891201942590145707084408673363236338735320909623421936216080709297429099298246664201573778101685813476833025304391362768460926143100965693130874923384062590760046469772424813296120707214816425685695593470565364151176866332389941154164234137981648148342496747222830095027767141778639485792584690124754600956527467686001491244941041338137797069982744687435642509704783437673181296992620594796363880090948363387722267765401
e = 0x10001
n = p * q
m = random.randrange(0, n)
c = pow(m, e, n)
d = sympy.mod_inverse(e, (p - 1) * (q - 1))
while 1:
menu(16)
| [
6738,
17268,
1330,
15034,
198,
6738,
36579,
13,
18274,
346,
13,
17618,
1330,
1635,
198,
11748,
25064,
198,
11748,
4738,
198,
11748,
10558,
88,
198,
198,
32109,
796,
1280,
7203,
32109,
13,
14116,
1600,
366,
26145,
11074,
961,
3419,
198,
... | 2.356312 | 1,204 |
import json
import random
import time
from pathlib import Path
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from apps.seletask import SeleTask
from settings import BASE_DIR, COOKIE_DIR, MESS_DIR
BING_KEYWORD_DIR = Path(MESS_DIR, 'bingkeywords')
if not BING_KEYWORD_DIR.exists():
BING_KEYWORD_DIR.mkdir()
| [
11748,
33918,
198,
11748,
4738,
198,
11748,
640,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1308... | 2.981366 | 161 |
#Members: William Hong, Daniel Han, Aaron Chen, Farhan Azad
# Libraries that are required for function performance
import cgitb
cgitb.enable()
import numpy as np
import pandas as pd
import scipy.stats
import pandas_datareader as pdr
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import sys
from numpy.polynomial import Polynomial as P
import numpy.linalg as la
# Functions Created in Other Files that will be used in our analysis
from companies_lists import companies_lists
from compute_average_daily_price_change import compute_average_daily_price_change
from compute_correlation_companies import compute_correlation_companies
from compute_exponential_moving_average import compute_exponential_moving_average
from compute_stochastic_oscillator import compute_stochastic_oscillator
from compute_volatility import compute_volatility
from data_discrete import data_discrete
from stock_obv_visual import stock_obv_visual
#DEVELOPER INSTRUCTIONS
#(1) MAKE SURE TO INSTALL THIS PACKAGE IN THE TERMINAL USING THE COMMANDS:
#pip install pandas-datareader
#(2) go on terminal in the directory this file is in
#(3) type in the command line
#python random_walk.py
#(4) enter in instrctions when prompted in the terminal
#PART 1: testing basic stat functions
#mean
list_1 = np.array([1, 2, 3, 4, 5])
#print(np.mean(list_1)) #should return 3
#2D arrays
list_2 = np.array([ [1, 2, 3], [4, 5, 6] ]) #list of lists
#print(np.mean(list_2)) #should return 3.5
#print(np.mean(list_2[0])) #should return mean of first list, which is 2
#should return mean of 1 and 4, 2 and 5, 3 and 6... which is [2.5 3.5 4.5]
#print(np.mean(list_2, axis = 0))
#should return mean of 1,2,3 and 4,5,6... which is [2. 5.]
#print(np.mean(list_2, axis = 1))
#standard deviation
list_3 = np.array([1, 2, 3, 4])
#print(np.std(list_3)) #should return 1.118 ish
#PART 2: testing the correlation coefficient function
# > 0 is positively correlated, 0 is uncorrelated, < 0 is negatively
# range of correlation is [-1, 1]
# ORDER OF INPUTS DOES NOT MATTER FOR CORRELATION COEFFICIENT
#perfect linear negative correlation
list_4 = [0, 1, 2]
list_5 = [2, 1, 0]
list_4 = pd.Series(list_4)
list_5 = pd.Series(list_5)
#print(list_4.corr(list_5)) #should return -1
#perfect linear positive correlation
list_6 = [0, 1, 2]
list_7 = [1, 2, 3]
list_6 = pd.Series(list_6)
list_7 = pd.Series(list_7)
#print(list_6.corr(list_7)) #should return 1
#PART 3: retrieving csv files from online and storing into list
COST = pdr.get_data_yahoo(symbols = 'COST', start = datetime(2010, 7, 14), end = datetime(2020, 7, 14))
#print(round(COST['Close'][0], 2)) #Costco's close at 7/14/2010 was $56.35
COST_list = []
x = 0
while (x < len(COST)):
entry = COST['Close'][x]
entry = round(entry, 2)
COST_list.append(entry)
x += 1
#print(COST_list)
#PART 4: entering command line input to get analysis
#THIS IS THE COOL SHIT
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
while True:
selection = input("Enter option here: ")
plt.close()
plt.close()
if selection == '1':
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to compute correlation coefficient between two data points \nEnter in 2 to compute volatility \nEnter in 3 to compute stochastic oscillator\nEnter in 4 to compute average daily percent change\nEnter in 5 to compute exponential moving average")
next_selection = input("Enter option here: ")
if next_selection == '1':
enter_company_1 = input("Enter a ticker here: ")
enter_company_2 = input("Enter another ticker here: ")
enter_start = input("Enter start date of analysis as Year, Month, Day (ex: 2010, 8, 12): ")
enter_end = input("Enter end date of analysis as Year, Month, Day (ex: 2020, 8, 12): ")
start_list = enter_start.split(", ")
end_list = enter_end.split(", ")
print("Here is the correlaton coefficient between %s and %s: " %(enter_company_1, enter_company_2))
print(compute_correlation_companies(enter_company_1, enter_company_2,
start_list[0], start_list[1], start_list[2], end_list[0], end_list[1], end_list[2] ))
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection == '2':
enter_company = input("Enter a ticker here: ")
enter_start = input("Enter start date of analysis as Year, Month, Day (ex: 2010, 8, 12): ")
enter_end = input("Enter end date of analysis as Year, Month, Day (ex: 2020, 8, 12): ")
start_list = enter_start.split(", ")
end_list = enter_end.split(", ")
print("Here is the volatility analysis for %s: " %enter_company)
compute_volatility(enter_company, start_list[0], start_list[1], start_list[2], end_list[0], end_list[1], end_list[2])
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection == '3':
enter_company = input("Enter a ticker here: ")
enter_date = input("Enter in today's date (ex: 2020, 8, 12): ")
date_list = enter_date.split(", ")
print("Here is the stochastic oscillator for %s: " %enter_company)
compute_stochastic_oscillator(enter_company, date_list[0], date_list[1], date_list[2])
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection == '4':
enter_company = input("Enter a ticker here: ")
enter_start = input("Enter start date of analysis as Year, Month, Day (ex: 2010, 8, 12): ")
enter_end = input("Enter end date of analysis as Year, Month, Day (ex: 2020, 8, 12): ")
start_list = enter_start.split(", ")
end_list = enter_end.split(", ")
print("Here is the average daily percent change for %s " %enter_company)
compute_average_daily_price_change(enter_company,start_list[0], start_list[1], start_list[2], end_list[0], end_list[1], end_list[2])
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection == '5':
enter_company = input("Enter a ticker here: ")
enter_start = input("Enter start date of analysis as Year, Month, Day (ex: 2010, 8, 12): ")
enter_end = input("Enter end date of analysis as Year, Month, Day (ex: 2020, 8, 12): ")
start_list = enter_start.split(", ")
end_list = enter_end.split(", ")
print("Here is the exponential moving average for %s " %enter_company)
print(compute_exponential_moving_average(enter_company,start_list[0], start_list[1], start_list[2], end_list[0], end_list[1], end_list[2]))
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection != '1' and next_selection != '2' and next_selection != '3' and next_selection != '4' and next_selection != '5':
print("Incorrect input entered")
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if selection == '2':
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to get a list of relevant market indexes \nEnter in 2 to get a list of US market sectors \nEnter in 3 to get a list of commodities\nEnter in 4 to get a list of US Treasury bonds and currencies")
next_selection = input("Enter option here: ")
if next_selection == '1':
print("________________________________________________________________________________")
a = "Below are some tickers you can enter that track market indexes\n"
b = "S&P 500: ^GSPC\n"
c = "Dow Jones Industrial Average: ^DJI\n"
d = "NASDAQ Composite: ^IXIC\n"
e = "Russell 2000: ^RUT \n"
f = "S&P 400: ^SP400\n"
g = "FTSE 100: %5EFTSE%3FP%3DFTSE\n"
h = "EURO Stoxx 50: ^STOXX50E\n"
i = "DAX 30: ^GDAXI\n"
j = "CAC 40: ^FCHI\n"
k = "Nikkei 225: ^N225\n"
l = "Shanghai Composite: 000001.SS\n"
m = "Hang Seng Index: ^HSI\n"
n = "KOSPI: ^KS11\n"
o = "Bovespa Index: BVSP\n"
p = "VIX Volatility Index: ^VIX"
print(a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p)
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection == '2':
print("________________________________________________________________________________")
a = "Below are some tickers you can enter that represent market sectors\n"
b = "Communication Services: VOX\n"
c = "Consumer Discretionary: VCR\n"
d = "Consumer Staples: VDC\n"
e = "Energy: VDE\n"
f = "Financials: VFH\n"
g = "Health: VHT\n"
h = "Industrials: VAW\n"
i = "Materials: VMC\n"
j = "Real Estate: VNQ\n"
k = "Technology: VGT\n"
l = "Utilities: VPU"
print(a+b+c+d+e+f+g+h+i+j+k+l)
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection == '3':
print("________________________________________________________________________________")
a = "Below are some tickers you can enter that track commodities futures\n"
b = "Gold: GC=F\n"
c = "Silver: SI=F\n"
d = "Copper: HG=F\n"
e = "Platinum: PL=F\n"
f = "Corn: ZC=F\n"
g = "Soybeans: ZS=F\n"
h = "Sugar: SB=F\n"
i = "Coffee: KC=F\n"
j = "Cocoa: CC=F\n"
k = "Cotton: CT=F\n"
l = "Rough Rice: ZR=F\n"
m = "Wheat: KE=F\n"
n = "Crude Oil: CL=F\n"
o = "Heating Oil: HO=F\n"
p = "RBOB Gasoline: RB=F\n"
q = "Natural Gas: NG=F"
print(a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q)
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection == '4':
print("________________________________________________________________________________")
a = "Below are some tickers you can enter that track US Treasury bond yields and currency exchange rates\n"
b = "US Treasury 10 Year Bond: ^TNX\n"
c = "US Treasury 5 Year Bond: ^FVX\n"
d = "US Treasury 30 Year Bond: ^TYX\n"
e = "US Dollar/Euro Exchange Rate: USDEUR=X\n"
f = "US Dollar/Yen Exchage Rate: JPY=X\n"
g = "US Dollar/Pound Sterling Exchange Rate: GBP=X\n"
h = "US Dollar/Yuan Exchange Rate: CNH=X\n"
i = "Euro/Yen Exchange Rate: EURJPY=X\n"
j = "Bitcoin/US Dollar: BTCUSD=X"
print(a+b+c+d+e+f+g+h+i+j)
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if next_selection != '1' and next_selection != '2' and next_selection != '3' and next_selection != '4':
print("Incorrect input entered")
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if selection == '3':
print("________________________________________________________________________________")
enter_company = input("Enter a ticker here: ")
enter_start = input("Enter start date of analysis as Year, Month, Day (ex: 2010, 8, 12): ")
enter_end = input("Enter end date of analysis as Year, Month, Day (ex: 2020, 8, 12): ")
start_list = enter_start.split(", ")
end_list = enter_end.split(", ")
print("Here is the price over time and volume over time for %s: " %enter_company)
print(stock_obv_visual(enter_company,start_list[0], start_list[1], start_list[2], end_list[0], end_list[1], end_list[2] ))
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
if selection == '4':
sys.exit()
if selection != '1' and selection != '2' and selection != '3' and selection != '4':
print("Incorrect input entered")
print("________________________________________________________________________________")
print("Instructions: \nEnter in 1 to start analysis \nEnter in 2 to get a list of useful tickers you can enter \nEnter in 3 to get graphing capabilities\nEnter in 4 to exit the program")
| [
2,
25341,
25,
3977,
9764,
11,
7806,
9530,
11,
12139,
12555,
11,
6755,
7637,
7578,
324,
198,
198,
2,
46267,
326,
389,
2672,
329,
2163,
2854,
198,
11748,
269,
18300,
65,
198,
66,
18300,
65,
13,
21633,
3419,
198,
11748,
299,
32152,
355... | 2.649741 | 5,790 |
#!/usr/bin/env python3
import pickle
import datetime
if __name__ == "__main__":
with open("downtime.pickle", "wb") as f:
pickle.dump(datetime.datetime.now(), f)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
2298,
293,
198,
11748,
4818,
8079,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
351,
1280,
7203,
67,
6887,
524,
13,
27729,
2... | 2.378378 | 74 |
from copy import deepcopy
VALID_COMPUTE_BODY = {
'agreementId': '0x0',
'owner': '0xC41808BBef371AD5CFc76466dDF9dEe228d2BdAA',
'providerSignature': 'sig',
'workflow': {
'stages': [
{
'algorithm': {
'container': {
'entrypoint': 'node $ALGO',
'image': 'node',
'tag': '10'
},
'id': 'did:op:87bdaabb33354d2eb014af5091c604fb4b0f67dc6cca4d18a96547bffdc27bcf',
'rawcode': "console.log('this is a test')",
'url': 'https://raw.githubusercontent.com/oceanprotocol/test-algorithm/master/javascript/algo.js'
},
'compute': {
'Instances': 1,
'maxtime': 3600,
'namespace': 'withgpu'
},
'index': 0,
'input': [
{
'id': 'did:op:87bdaabb33354d2eb014af5091c604fb4b0f67dc6cca4d18a96547bffdc27bcf',
'index': 0,
'url': [
'https://data.ok.gov/sites/default/files/unspsc%20codes_3.csv'
]
},
],
'output': {
'brizoAddress': '0x4aaab179035dc57b35e2ce066919048686f82972',
'brizoUri': 'https://brizo.marketplace.dev-ocean.com',
'metadata': {
'name': 'Workflow output'
},
'metadataUri': 'https://aquarius.marketplace.dev-ocean.com',
'nodeUri': 'https://nile.dev-ocean.com',
'owner': '0xC41808BBef371AD5CFc76466dDF9dEe228d2BdAA',
'publishAlgorithmLog': True,
'publishOutput': True,
'secretStoreUri': 'https://secret-store.nile.dev-ocean.com',
'whitelist': [
'0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e',
'0xACBd138aBD70e2F00903268F3Db08f2D25677C9e'
]
}
}
],
}
}
NO_WORKFLOW_COMPUTE_BODY = deepcopy(VALID_COMPUTE_BODY)
NO_WORKFLOW_COMPUTE_BODY['workflow'] = {}
NO_STAGES_COMPUTE_BODY = deepcopy(VALID_COMPUTE_BODY)
NO_STAGES_COMPUTE_BODY['workflow']['stages'] = []
INVALID_STAGE_COMPUTE_BODY = deepcopy(VALID_COMPUTE_BODY)
del INVALID_STAGE_COMPUTE_BODY['workflow']['stages'][0]['algorithm']
VALID_COMPUTE_BODY_WITH_NO_MAXTIME = deepcopy(VALID_COMPUTE_BODY)
del VALID_COMPUTE_BODY_WITH_NO_MAXTIME['workflow']['stages'][0]['compute']['maxtime']
| [
6738,
4866,
1330,
2769,
30073,
198,
198,
23428,
2389,
62,
9858,
30076,
36,
62,
33,
33076,
796,
1391,
198,
220,
220,
220,
705,
363,
10237,
7390,
10354,
705,
15,
87,
15,
3256,
198,
220,
220,
220,
705,
18403,
10354,
705,
15,
87,
34,
... | 1.581787 | 1,724 |
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from pyramid.view import view_config
from schematizer.api.decorators import log_api
from schematizer.api.decorators import transform_api_response
from schematizer.api.exceptions import exceptions_v1
from schematizer.api.requests import requests_v1
from schematizer.api.responses import responses_v1
from schematizer.logic import doc_tool
from schematizer.logic import schema_repository
from schematizer.models.source import Source
@view_config(
route_name='api.v1.list_sources',
request_method='GET',
renderer='json'
)
@transform_api_response()
@view_config(
route_name='api.v1.get_source_by_id',
request_method='GET',
renderer='json'
)
@transform_api_response()
@view_config(
route_name='api.v1.list_topics_by_source_id',
request_method='GET',
renderer='json'
)
@transform_api_response()
@view_config(
route_name='api.v1.get_latest_topic_by_source_id',
request_method='GET',
renderer='json'
)
@transform_api_response()
@view_config(
route_name='api.v1.update_category',
request_method='POST',
renderer='json'
)
@transform_api_response()
@log_api()
@view_config(
route_name='api.v1.delete_category',
request_method='DELETE',
renderer='json'
)
@transform_api_response()
@view_config(
route_name='api.v1.create_refresh',
request_method='POST',
renderer='json'
)
@transform_api_response()
@view_config(
route_name='api.v1.list_refreshes_by_source_id',
request_method='GET',
renderer='json'
)
@transform_api_response()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
1584,
44628,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428... | 2.864935 | 770 |
from Demo_gym.envs.atari.atari_env import AtariEnv
| [
6738,
34588,
62,
1360,
76,
13,
268,
14259,
13,
35554,
13,
35554,
62,
24330,
1330,
35884,
4834,
85,
198
] | 2.684211 | 19 |
"""
Unit tests for the calculator library
"""
import calculator
| [
37811,
198,
26453,
5254,
329,
262,
28260,
5888,
198,
37811,
198,
198,
11748,
28260,
628
] | 4.4 | 15 |
import random
import numpy as np
import torch
import torchvision
from torchvision.transforms import functional as F
| [
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
6738,
28034,
10178,
13,
7645,
23914,
1330,
10345,
355,
376,
628,
198,
220,
220,
220,
220
] | 3.8125 | 32 |
import json
from pytest import approx
import netket as nk
import numpy as np
import shutil
import tempfile
SEED = 3141592
L = 4
sx = [[0, 1], [1, 0]]
sy = [[0, -1j], [1j, 0]]
sz = [[1, 0], [0, -1]]
sigmam = [[0, 0], [1, 0]]
| [
11748,
33918,
198,
6738,
12972,
9288,
1330,
5561,
198,
11748,
2010,
7126,
355,
299,
74,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
628,
198,
5188,
1961,
796,
34085,
1314,
5892,
198,
43,
796,
604,... | 2.186916 | 107 |
#!/usr/bin/env python
import csv
import json
import re
import sys
import warnings
from collections import OrderedDict
from copy import deepcopy
from io import StringIO
from pathlib import Path
import click
import requests
from ocdsextensionregistry import ProfileBuilder
from ocdskit.schema import add_validation_properties
basedir = Path(__file__).resolve().parent
def get(url):
"""
GETs a URL and returns the response. Raises an exception if the status code is not successful.
"""
response = requests.get(url)
response.raise_for_status()
return response
def csv_reader(url):
"""
Reads a CSV from a URL and returns a ``csv.DictReader`` object.
"""
return csv.DictReader(StringIO(get(url).text))
def coerce_to_list(data, key):
"""
Returns the value of the ``key`` key in the ``data`` mapping. If the value is a string, wraps it in an array.
"""
item = data.get(key, [])
if isinstance(item, str):
return [item]
return item
def edit_code(row, oc4ids_codes, source):
"""
If the row's "Code" is in the ``oc4ids_codes`` list, adds " or project" after "contracting process" in the row's
"Description" and sets the row's "Source" to ``"OC4IDS"``. Otherwise, sets the row's "Source" to ``source``.
"""
if row['Code'] in oc4ids_codes:
row['Description'] = re.sub(r'(?<=contracting process\b)', ' or project', row['Description'])
row['Description'] = re.sub(r'(?<=contracting processes\b)', ' or projects', row['Description'])
row['Source'] = 'OC4IDS'
else:
row['Source'] = source
return row
def traverse(schema_action=None, object_action=None):
"""
Implements common logic for walking through the schema.
"""
if object_action is None:
return method
# Similar in structure to `add_versioned` in the standard's `make_versioned_release_schema.py`.
def remove_null_and_pattern_properties(*args):
"""
Removes the "patternProperties" key, ``"null"`` from the "type" key, and ``None`` from the "enum" key.
"""
traverse(schema_action, object_action)(*args)
def remove_deprecated_properties(*args):
"""
Removes "deprecated" properties.
"""
traverse(schema_action)(*args)
def remove_integer_identifier_types(*args):
"""
Sets all ``id`` fields to allow only strings, not integers.
"""
traverse(schema_action)(*args)
def compare(actual, infra_list, ocds_list, prefix, suffix):
"""
Aborts if ``infra_list`` contains values not in ``actual``, or if ``actual`` contains values not in ``infra_list``
or ``ocds_list``. This ensures an editor updates this script when codelists or definitions are added to OC4IDS.
"""
actual = set(actual)
# An editor might have added an infrastructure codelist, or copied an OCDS codelist, without updating this script.
added = actual - infra_list - ocds_list
if added:
sys.exit(f'{prefix} has unexpected {", ".join(added)}: add to infra_{suffix} or ocds_{suffix}?')
# An editor might have removed an infrastructure codelist, without updating this script.
removed = infra_list - actual
if removed:
sys.exit(f'{prefix} is missing {", ".join(removed)}: remove from infra_{suffix}?')
@click.group()
@cli.command()
@click.option('--ppp-base-url',
default='https://standard.open-contracting.org/profiles/ppp/latest/en/_static/patched/')
def update(ppp_base_url):
"""
Aligns OC4IDS with OCDS. It uses OCDS for PPPs as a basis, as it includes most definitions and codelists needed in
OC4IDS. It copies definitions and codelists across, making modifications as required.
Run this command for every release of OCDS for PPPs, review any changes to schemas or codelists, and update the
command as needed.
Some OC4IDS-specific definitions have fields with the same names as in OCDS-specific definitions, notably:
- procurementMethod
- procurementMethodDetails
- tenderers
The descriptions of most other such fields have diverged. As such, the command makes no effort to copy the
descriptions of such fields, and instead leaves this up to the editor.
"""
ocds_base_url = 'https://standard.open-contracting.org/1.1/en/'
builder = ProfileBuilder('1__1__5', {'budget': 'master'})
ppp_schema = get(f'{ppp_base_url}release-schema.json').json(object_pairs_hook=OrderedDict)
ppp_schema = builder.patched_release_schema(schema=ppp_schema)
schema_dir = basedir / 'schema' / 'project-level'
codelists_dir = schema_dir / 'codelists'
with (schema_dir / 'project-schema.json').open() as f:
schema = json.load(f, object_pairs_hook=OrderedDict)
infra_codelists = {
'contractingProcessStatus.csv',
'contractNature.csv',
'metricID.csv',
'modificationType.csv',
'projectSector.csv',
'projectStatus.csv',
'projectType.csv',
'relatedProjectScheme.csv',
'relatedProject.csv',
}
ocds_codelists = {
'currency.csv',
'documentType.csv',
'geometryType.csv',
'locationGazetteers.csv',
'method.csv',
'partyRole.csv',
'releaseTag.csv',
'unitClassificationScheme.csv',
}
compare([path.name for path in codelists_dir.iterdir()], infra_codelists, ocds_codelists,
'schema/project-level/codelists', 'codelists')
infra_definitions = {
'ContractingProcess',
'ContractingProcessSummary', # Similar to individual release in OCDS
'LinkedRelease', # Similar to linked release in OCDS
'Modification',
'RelatedProject', # Similar to relatedProcess in OCDS
'Person',
}
ocds_definitions = {
'Period',
'Classification',
'Location',
'Value',
'Organization',
'OrganizationReference',
'Address',
'ContactPoint',
'BudgetBreakdown',
'Document',
'Identifier',
'Metric',
'Observation',
'Transaction',
}
compare(schema['definitions'], infra_definitions, ocds_definitions,
'schema/project-level/project-schema.json#/definitions', 'definitions')
# Originally from https://docs.google.com/spreadsheets/d/1ttXgMmmLvqBlPRi_4jAJhIobjnCiwMv13YwGfFOnoJk/edit#gid=0
ignore = {
# https://github.com/open-contracting/infrastructure/issues/269
'finalAudit',
# https://github.com/open-contracting/standard/issues/870
'contractSchedule',
# PPP-specific code or description
'needsAssessment',
'projectAdditionality',
'financeAdditionality',
'pppModeRationale',
'riskComparison',
'discountRate',
'equityTransferCaps',
'financeArrangements',
'guaranteeReports',
'grants',
'servicePayments',
'landTransfer',
'assetTransfer',
'revenueShare',
'otherGovernmentSupport',
'tariffMethod',
'tariffReview',
'tariffs',
'tariffIllustration',
'handover',
'financialStatement',
}
# Copy the OCDS codelists.
for basename in ocds_codelists:
path = schema_dir / 'codelists' / basename
if basename in ('documentType.csv', 'partyRole.csv'):
with open(path) as f:
reader = csv.DictReader(f)
fieldnames = reader.fieldnames
oc4ids_rows = []
oc4ids_codes = []
for row in reader:
if row['Source'] == 'OC4IDS':
oc4ids_rows.append(row)
oc4ids_codes.append(row['Code'])
with open(path, 'w') as f:
if basename == 'documentType.csv':
io = StringIO()
writer = csv.DictWriter(io, fieldnames, lineterminator='\n', extrasaction='ignore')
writer.writeheader()
seen = []
# Add codes from OCDS for PPPs.
reader = csv_reader(f'{ppp_base_url}codelists/{basename}')
for row in reader:
if row['Code'] not in ignore:
seen.append(row['Code'])
# These codes' descriptions are entirely new.
if row['Code'] in ('environmentalImpact',):
row = next(oc4ids_row for oc4ids_row in oc4ids_rows if oc4ids_row['Code'] == row['Code'])
else:
edit_code(row, oc4ids_codes, 'OCDS for PPPs')
writer.writerow(row)
# Add codes from OCDS.
reader = csv_reader(f'{ocds_base_url}codelists/documentType.csv')
for row in reader:
if row['Code'] not in seen and row['Code'] not in ignore:
seen.append(row['Code'])
edit_code(row, oc4ids_codes, 'OCDS')
writer.writerow(row)
# Add pre-existing codes from OC4IDS.
writer.writerows(row for row in oc4ids_rows if row['Code'] not in seen)
text = io.getvalue()
elif basename == 'partyRole.csv':
io = StringIO()
writer = csv.DictWriter(io, fieldnames, lineterminator='\n', extrasaction='ignore')
writer.writeheader()
seen = []
# Add codes from OCDS.
reader = csv_reader(f'{ocds_base_url}codelists/partyRole.csv')
for row in reader:
if row['Code'] not in seen:
seen.append(row['Code'])
edit_code(row, oc4ids_codes, 'OCDS')
writer.writerow(row)
# Add pre-existing codes from OC4IDS.
writer.writerows(row for row in oc4ids_rows if row['Code'] not in seen)
text = io.getvalue()
else:
text = get(f'{ppp_base_url}codelists/{basename}').text
f.write(text)
# The following definitions follow the same order as in project-schema.json.
copy_def('Period', {
# Refer to project.
('description',): lambda s: s.replace('contracting process', 'project or contracting process'),
})
copy_def('Classification', {
# Remove line item classifications from the definition.
('properties', 'scheme', 'description'): lambda s: s[:s.index(' For line item classifications,')],
})
# Remove the `itemClassificationScheme.csv` codelist.
del(schema['definitions']['Classification']['properties']['scheme']['codelist'])
del(schema['definitions']['Classification']['properties']['scheme']['openCodelist'])
copy_def('Location')
# noqa: Original from ocds_location_extension: "The location where activity related to this tender, contract or license will be delivered, or will take place. A location can be described by either a geometry (point location, line or polygon), or a gazetteer entry, or both."
schema['definitions']['Location']['description'] = "The location where activity related to this project will be delivered, or will take place. A location may be described using a geometry (point location, line or polygon), a gazetteer entry, an address, or a combination of these." # noqa: E501
# Add id to Location.
schema['definitions']['Location']['properties']['id'] = {
'title': 'Identifier',
'description': 'A local identifier for this location, unique within the array this location appears in.',
'type': 'string',
'minLength': 1,
}
# Add address to Location.
schema['definitions']['Location']['properties']['address'] = {
'title': 'Address',
'description': 'A physical address where works will take place.',
'$ref': '#/definitions/Address',
}
schema['definitions']['Location']['properties'].move_to_end('id', last=False)
schema['definitions']['Location']['required'] = ['id']
# Set stricter validation on gazetteer identifiers
schema['definitions']['Location']['properties']['gazetteer']['properties']['identifiers']['uniqueItems'] = True
copy_def('Value')
copy_def('Organization', {
# Refer to project instead of contracting process, link to infrastructure codelist instead of PPP codelist.
('properties', 'roles', 'description'): lambda s: s.replace('contracting process', 'project').replace('profiles/ppp/latest/en/', 'infrastructure/{{version}}/{{lang}}/') # noqa: E501
})
# Remove unneeded extensions and details from Organization.
del(schema['definitions']['Organization']['properties']['shareholders'])
del(schema['definitions']['Organization']['properties']['beneficialOwnership'])
del(schema['definitions']['Organization']['properties']['details'])
# Set stricter validation on party roles
schema['definitions']['Organization']['properties']['roles']['uniqueItems'] = True
# Add `people` property to OrganizationReference
schema['definitions']['Organization']['properties']['people'] = {
"title": "People",
"description": "People associated with, representing, or working on behalf of this organization in respect of this project.", # noqa: E501
"type": "array",
"items": {
"$ref": "#/definitions/Person"
},
"uniqueItems": True
}
copy_def('OrganizationReference')
copy_def('Address')
copy_def('ContactPoint', {
# Refer to project instead of contracting process.
('properties', 'name', 'description'): lambda s: s.replace('contracting process', 'project'),
})
copy_def('BudgetBreakdown')
copy_def('Document', {
# Link to infrastructure codelist instead of PPP codelist
('properties', 'documentType', 'description'): lambda s: s.replace('profiles/ppp/latest/en/', 'infrastructure/{{version}}/{{lang}}/'), # noqa: E501
})
# noqa: Original from standard: "A short description of the document. We recommend descriptions do not exceed 250 words. In the event the document is not accessible online, the description field can be used to describe arrangements for obtaining a copy of the document.",
schema['definitions']['Document']['properties']['description']['description'] = "Where a link to a full document is provided, the description should provide a 1 - 3 paragraph summary of the information the document contains, and the `pageStart` field should be used to make sure readers can find the correct section of the document containing more information. Where there is no linked document available, the description field may contain all the information required by the current `documentType`. \n\nLine breaks in text (represented in JSON using `\\n\\n`) must be respected by systems displaying this information, and systems may also support basic HTML tags (H1-H6, B, I, U, strong, A and optionally IMG) or [markdown syntax](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) for formatting. " # noqa: E501
# noqa: Original from standard: " direct link to the document or attachment. The server providing access to this document should be configured to correctly report the document mime type."
schema['definitions']['Document']['properties']['url']['description'] = "This should be a direct link to the document or web page where the information described by the current documentType exists." # noqa: E501
copy_def('Identifier')
copy_def('Metric', {
('properties', 'id', 'description'): lambda s: s.replace('contracting process', 'contracting process or project')}), # noqa: E501
schema['definitions']['Metric']['description'] = "Metrics are used to set out forecast and actual metrics targets for a project: for example, planned and actual physical and financial progress over time." # noqa: E501
# noqa: Original from standard: "Metrics are used to set out targets and results from a contracting process. During the planning and tender sections, a metric indicates the anticipated results. In award and contract sections it indicates the awarded/contracted results. In the implementation section it is used to provide updates on actually delivered results, also known as outputs."
copy_def('Observation')
# Remove the `relatedImplementationMilestone` property
del(schema['definitions']['Observation']['properties']['relatedImplementationMilestone'])
copy_def('Transaction')
remove_null_and_pattern_properties(schema)
remove_integer_identifier_types(schema)
remove_deprecated_properties(schema)
add_validation_properties(schema)
with (schema_dir / 'project-schema.json').open('w') as f:
json.dump(schema, f, ensure_ascii=False, indent=2)
f.write('\n')
if __name__ == '__main__':
cli()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
14601,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
... | 2.558422 | 6,667 |
from pylearn2.datasets.mnist import MNIST
from pylearn2.space import IndexSpace, VectorSpace
import unittest
from pylearn2.testing.skip import skip_if_no_data
import numpy as np
| [
6738,
279,
2349,
1501,
17,
13,
19608,
292,
1039,
13,
10295,
396,
1330,
29060,
8808,
198,
6738,
279,
2349,
1501,
17,
13,
13200,
1330,
12901,
14106,
11,
20650,
14106,
198,
11748,
555,
715,
395,
198,
6738,
279,
2349,
1501,
17,
13,
33407,... | 3.033898 | 59 |
from direct.showbase.PythonUtil import bound as clamp
from . import CogdoMazeGameGlobals as Globals
import math
import random
| [
6738,
1277,
13,
12860,
8692,
13,
37906,
18274,
346,
1330,
5421,
355,
29405,
198,
6738,
764,
1330,
327,
519,
4598,
44,
6201,
8777,
9861,
672,
874,
355,
40713,
874,
198,
11748,
10688,
198,
11748,
4738,
198
] | 3.5 | 36 |
"""Write functions to import memristor data."""
import numpy as np
import pandas as pd
from Jesse_Funcs import blahut
def get_raw_data(path):
"""
Load raw data from a path.
Parameters
----------
path : str
Absolute path to the data.
Returns
-------
Vs : np.array, shape (len(data),)
Rs : np.array, shape (len(data),)
"""
data = pd.read_pickle(path)
Vs = np.array(data['V'])
Rs = np.array(data['R'])
return Vs,Rs
def range_extender(Vs, Rs, num_ext):
"""
Extends the range of the memristor function so that the min and max R values are
repeated num_ext times past min and max V
Parameters
----------
Vs : np.array, shape (len(data),)
Rs : np.array, shape (len(data),)
num_ext : float
Number of times the end values should be repeated
"""
num_ext = int(num_ext)
Vs = np.array(Vs)
Rs = np.array(Rs)
delta_V = Vs[1]-Vs[0]
orig_min_Vs = np.amin(Vs)
orig_max_Vs = np.amax(Vs)
for i in range(num_ext):
min_Vs = np.amin(Vs)
max_Vs = np.amax(Vs)
min_Vs_indx = [Vs == min_Vs]
max_Vs_indx = [Vs == max_Vs]
Rs_min = Rs[min_Vs_indx]
Rs_max = Rs[max_Vs_indx]
Vs_min = Vs[min_Vs_indx] - delta_V
Vs_max = Vs[max_Vs_indx] + delta_V
Vs = np.append(Vs,Vs_min)
Vs = np.append(Vs,Vs_max)
Rs = np.append(Rs,Rs_min)
Rs = np.append(Rs,Rs_max)
return Vs, Rs, orig_min_Vs, orig_max_Vs
def get_memristor_data(path, n_mem, num_ext=5, norm_min=-1., norm_max=1.):
"""
Parameters
----------
path : str
Absolute path to the data.
n_mem : float
Number of memristors we want to simulate
num_ext : float
Number of times the end values should be repeated (see range_extender)
Returns
-------
vs : np.array, shape (n_samp, n_mem)
mus : np.array, shape (n_samp, n_mem)
sigs : np.array, shape (n_samp, n_mem)
orig_min_Vs : float
orig_max_Vs : float
"""
Vs,Rs = get_raw_data(path)
Rs = np.log10(Rs)
Vs = np.array(Vs)
Rs = np.array(Rs)
orig_min_Vs = np.amin(Vs)
orig_max_Vs = np.amax(Vs)
orig_min_Rs = np.amin(Rs)
orig_max_Rs = np.amax(Rs)
Vs = normalizer(Vs,norm_min,norm_max)
Rs = normalizer(Rs,norm_min,norm_max)
Vs, Rs, _, _ = range_extender(Vs,Rs,num_ext)
mus, sigs, vs = blahut.moments(Vs,Rs)
vs = np.broadcast_to(vs[:,None], (vs.size, n_mem)).astype(np.float32)
mus = np.broadcast_to(mus[:,None], (mus.size, n_mem)).astype(np.float32)
sigs = np.broadcast_to(sigs[:,None], (sigs.size, n_mem)).astype(np.float32)
return vs, mus, sigs, orig_min_Vs, orig_max_Vs, orig_min_Rs, orig_max_Rs
def get_simulated_data(path, n_mem, num_ext=5, norm_min=-1., norm_max=1.):
"""
Simulates some memristors.
Parameters
----------
n_mem:
"""
Vs,_ = get_raw_data(path)
Vs = np.array(Vs)
Vs = np.repeat(Vs,10)
eta = np.random.normal(0,0.3,len(Vs)) #0.085 for 2.68 bits
Rs = Vs + eta
orig_min_Vs = np.amin(Vs)
orig_max_Vs = np.amax(Vs)
orig_min_Rs = np.amin(Rs)
orig_max_Rs = np.amax(Rs)
Vs = normalizer(Vs,norm_min,norm_max)
Rs = normalizer(Rs,norm_min,norm_max)
Vs, Rs, _, _ = range_extender(Vs,Rs,num_ext)
mus, sigs, vs = blahut.moments(Vs,Rs)
vs = np.broadcast_to(vs[:,None], (vs.size, n_mem)).astype(np.float32)
mus = np.broadcast_to(mus[:,None], (mus.size, n_mem)).astype(np.float32)
sigs = np.broadcast_to(sigs[:,None], (sigs.size, n_mem)).astype(np.float32)
return vs, mus, sigs, orig_min_Vs, orig_max_Vs, orig_min_Rs, orig_max_Rs
| [
37811,
16594,
5499,
284,
1330,
1066,
1585,
273,
1366,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
18033,
62,
24629,
6359,
1330,
33367,
315,
198,
198,
4299,
651,
62,
1831,
62,
7... | 2.124209 | 1,739 |
import logging
from PyQt5 import Qsci
from PyQt5.QtGui import QColor, QFont
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
9485,
48,
83,
20,
1330,
1195,
36216,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1195,
10258,
11,
1195,
23252,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
... | 2.577778 | 45 |
from .linear_regression import LinearRegression
from .logistic_regression import LogisticRegression | [
6738,
764,
29127,
62,
2301,
2234,
1330,
44800,
8081,
2234,
198,
6738,
764,
6404,
2569,
62,
2301,
2234,
1330,
5972,
2569,
8081,
2234
] | 4.304348 | 23 |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 14:27:27 2019
@author: Reuben
Variables help resultbox know how to interpret and display data. Sometimes,
variables have different components, like x, y, and z coordinates. They
often have units, such as meters.
The idea is that we define a variable just once. Whenever we add in some
data for that variable to a Box, we also pass in that variable. Then, we
can let resultbox take care of the rest.
"""
import pandas as pd
from difflib import SequenceMatcher
from . import utils
def _expand_single(key, val, store, specified=None):
''' Expand a value into its components
Args:
key (Variable): The variable key
val: The data for that variable
store (Store): The variable store
Returns:
dict: A dictionary of keys and values. If the variables has components,
the dictionary contains the component keys and component values.
'''
if isinstance(val, dict):
r = expand(val, store)
return {key: r}
else:
if key in store:
if specified is not None and key not in specified:
return {key, val}
if store[key].subkeys is not None:
subkeys = store[key].subkeys
if len(val) == len(subkeys):
out = {}
for subkey, v in zip(subkeys, val):
out[subkey] = v
return out
return {key: val}
def expand(source, store, specified=None):
''' Expand variable components within a list or dictionary recursively
Args:
source (list or dict): The source list (of dictionaries) or dictionary.
The keys must exist in the store.
store (Store): The corresponding Store instance.
Returns:
list or dict: The expanded list or dictionary.
'''
if isinstance(source, list):
out = []
for val in source:
r = expand(val, store)
out.append(r)
elif isinstance(source, dict):
out = {}
for key, val in source.items():
out.update( _expand_single(key, val, store, specified))
return out
class Store(dict):
''' A store is a container for Variables '''
def new(self, name, doc=None, unit=None, components=None, sep=' - ',
category=None, tags=None, safe=True, identifier=None):
''' Create a new variable
Args:
name (str): The variable name
doc (str): A documentation string. Defaults to None.
unit (str): The units of the variable (usually abbreviated).
Defaults to None.
components (list[str]): A list of names for each
component. Defaults to None.
sep (str): The separator between the name and any component names.
category (str): An optional category
tags (list[str]): Optional tags
safe (bool): Optional. If true, do not allow duplicates. Defaults
to True.
identifier (str): [Optional] Identifier for the variable.
Returns:
Variable: The new variable
Note:
The 'add' method is a copy of this method.
'''
new = Variable(name, doc, unit, components=components, sep=sep,
category=category, tags=tags, identifier=identifier)
if self._unique:
if new.key in self and safe:
raise KeyError('Key "' + str(name) + '" already exists. Names '
+ 'must be unique.')
elif new.key in self and not safe:
return self[new.key]
self[new.key] = new
if identifier is not None:
self._id_dct[identifier] = new.key
return new
def id_starts_with(self, s):
""" Returns a list of variables with identifies matching a suffix
Args:
s (str): The string at the start of the identifiers.
Returns:
list: List of matching variables. If no variables match, the
list will be empty.
"""
d = self._id_dct
return [self[v] for k, v in d.items() if k.startswith(s)]
def nearest(self, key):
''' Return the variable that best best-matches the input string
Args:
key (str): The input string
Returns:
Variable: The variable with the key that best matches the input
'''
keys = list(self.keys())
ratios = [SequenceMatcher(None, key, k).ratio() for k in keys]
return self[keys[ratios.index(max(ratios))]]
def suffixed(self, variable, suffix):
''' Create or return a suffixed variable using an existing one
Args:
variable (Variable): A variable
suffix (str): The suffix to append to the name
Returns:
Variable: Creates a new one if needed, or returns existing.
'''
new_name = variable.name + suffix
key = Variable._append_unit(new_name, variable.unit)
if key in self:
return self[key]
else:
kwargs = variable.to_dict()
kwargs['name'] = new_name
return self.new(**kwargs)
add = new
class Variable(str):
''' Metadata for specific data
Args:
name (str): The name of the variable
doc (str): A documentation string. Defaults to None.
unit (str): The units of the variable (usually abbreviated).
Defaults to None.
components (list[str]): A list of names for each
component. Defaults to None.
sep (str): The separator between the name and any component names.
category (str): An optional category
tags (list[str]): Optional tags
Note:
Variables subclass `str`, so they can be used like strings. Care is
needed when serialising and deserialising them, as otherwise their
special attributes will be lost.
'''
def to_dict(self):
''' Create a dictionary containing the Variable attributes '''
d = {'name': self.name,
'doc': self.doc,
'unit': self.unit,
'components': self.components,
'sep': self.sep,
'category': self.category,
'tags': self.tags,
'identifier': self.identifier}
return d
@classmethod
def from_dict(cls, dct):
''' Create a new Variable instance from a dictionary of attributes '''
return cls(**dct)
def to_str(self):
''' Create a string containing the Variable attributes '''
dct = self.to_dict()
return utils.dict_to_str(dct, val_sep='=', key_sep=';')
@classmethod
def from_str(cls, s):
''' Create a new Variable instance from a string of attributes '''
d = utils.str_to_dict(s, val_sep='=', key_sep=';')
return cls.from_dict(d)
@classmethod
def _append_unit(cls, string, unit):
''' Add the unit to the string '''
if unit is not None:
return string + ' [' + unit + ']'
else:
return string
def _component_name(self, component):
''' Get the full key for a component '''
component_name = self.name + self.sep + component
return self._append_unit(component_name, self.unit)
@property
def subkeys(self):
''' Return a list of keys for the variable components '''
if self.components is None:
return None
keys = []
for component in self.components:
keys.append(self._component_name(component))
return keys
@property
def label(self):
''' The variable name formatted with a trailing colon '''
return self.name + ':'
class Aliases(dict):
''' Variables allow other keys to be used as aliases for variables
This class is a subclass of `dict`. It is designed so that each key
is an alias, and its value is the corresponding variable. The Aliases
instance provides methods to 'translate' (swap) aliases within standard
data structures to be their corresponding variables.
'''
def translate(self, obj):
''' Recusively translate an object, swapping any dictionary keys '''
if obj is None:
return None
if isinstance(obj, str):
return self.translate_str(obj)
elif isinstance(obj, list):
return self.translate_list(obj)
elif isinstance(obj, dict):
return self.translate_dict(obj)
else:
return obj
def translate_str(self, s):
''' Translate a string '''
return self[s]
def translate_dict_vals(self, dct):
''' Translate the values in a dictionary '''
return {k: self.translate(v) for k, v in dct.items()}
def translate_dict(self, dct):
''' Translate the keys in a dictionary '''
return {self[k]: self.translate(v) for k, v in dct.items()}
def translate_list(self, lst):
''' Translate the objects in a list '''
return [self.translate(k) for k in lst]
def inverse(self):
''' Return an inverse dictionary of self
Note:
Values must be unique.
'''
return {v: k for k, v in self.items()}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
8621,
220,
352,
1478,
25,
1983,
25,
1983,
13130,
198,
198,
31,
9800,
25,
797,
44636,
198,
198,
23907,
2977,
1037,
1255,
3524,
760,
703,
2... | 2.281153 | 4,197 |
from enum import Enum
| [
6738,
33829,
1330,
2039,
388,
198
] | 3.666667 | 6 |
import locker
import os
import tkinter as tk
from tkinter import PhotoImage
from tkinter import messagebox
from tkinter import filedialog
cwd = os.getcwd()
if not os.path.exists('files/'):
os.mkdir('files')
if __name__ == '__main__':
root = tk.Tk()
root.geometry('400x305')
root.title('Folder Locker/Unlocker')
root.resizable(0,0)
lock_icon = PhotoImage(file='icons/lock.png').subsample(2,2)
unlock_icon = PhotoImage(file='icons/unlock.png').subsample(2,2)
back_icon = PhotoImage(file='icons/back.png')
choose_folder_icon = PhotoImage(file='icons/choose_folder.png')
app = Application(master=root)
app.mainloop() | [
11748,
17355,
198,
198,
11748,
28686,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
5555,
5159,
198,
6738,
256,
74,
3849,
1330,
3275,
3524,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
198,
198,
66,
169... | 2.638655 | 238 |
import yaml
import os
if __name__ == "__main__":
repos = os.environ.get('DPP_GITHUB_REPOSITORIES')
if repos is not None:
repos = repos.split(';')
config = {}
for repo in repos:
repo = repo.split(':')
if len(repo) > 1:
repo, path = repo
else:
repo = repo[0]
path = None
config[repo] = {
'repository': repo,
}
if path is not None:
config[repo]['base-path'] = path
with open('github.source-spec.yaml', 'w') as source_spec:
yaml.dump(config, source_spec)
| [
11748,
331,
43695,
198,
11748,
28686,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1128,
418,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
6322,
47,
62,
38,
10554,
10526,
62,
35316,
2640,
2043,... | 1.780488 | 369 |