hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7156ae46165bfa283ef31892e3fde5d02b6eeab
| 166
|
py
|
Python
|
StarAcmSpider/StarAcmSpider/items.py
|
MeiK-h/StarACM
|
54654bdc19c8eff02c67ba77784d08368570d4e7
|
[
"MIT"
] | null | null | null |
StarAcmSpider/StarAcmSpider/items.py
|
MeiK-h/StarACM
|
54654bdc19c8eff02c67ba77784d08368570d4e7
|
[
"MIT"
] | null | null | null |
StarAcmSpider/StarAcmSpider/items.py
|
MeiK-h/StarACM
|
54654bdc19c8eff02c67ba77784d08368570d4e7
|
[
"MIT"
] | null | null | null |
import scrapy
class StarAcmSpiderItem(scrapy.Item):
username = scrapy.Field()
source = scrapy.Field()
run_id = scrapy.Field()
data = scrapy.Field()
| 18.444444
| 37
| 0.674699
|
import scrapy
class StarAcmSpiderItem(scrapy.Item):
username = scrapy.Field()
source = scrapy.Field()
run_id = scrapy.Field()
data = scrapy.Field()
| true
| true
|
f7156b5c602c9fa2552e9ba98cbbe35c20310e78
| 15,637
|
py
|
Python
|
MomentumProject/mtInitialize_004.py
|
hpgit/HumanFoot
|
f9a1a341b7c43747bddcd5584b8c98a0d1ac2973
|
[
"Apache-2.0"
] | null | null | null |
MomentumProject/mtInitialize_004.py
|
hpgit/HumanFoot
|
f9a1a341b7c43747bddcd5584b8c98a0d1ac2973
|
[
"Apache-2.0"
] | null | null | null |
MomentumProject/mtInitialize_004.py
|
hpgit/HumanFoot
|
f9a1a341b7c43747bddcd5584b8c98a0d1ac2973
|
[
"Apache-2.0"
] | null | null | null |
import copy
import sys
#if '../PyCommon/modules' not in sys.path:
# sys.path.append('../PyCommon/modules')
if './modules' not in sys.path:
sys.path.append('./modules')
import Resource.ysMotionLoader as yf
import Simulator.ysPhysConfig as ypc
import Math.mmMath as mm
import Motion.ysHierarchyEdit as yme
import Motion.ysMotion as ym
## Constant
HIP = 'Hips'
RIGHT_UP_LEG = 'RightUpLeg'
RIGHT_LEG = 'RightLeg'
RIGHT_FOOT = 'RightFoot'
RIGHT_TOES = 'RightToes'
RIGHT_TOES_END = 'RightToes_Effector'
LEFT_UP_LEG = 'LeftUpLeg'
LEFT_LEG = 'LeftLeg'
LEFT_FOOT = 'LeftFoot'
LEFT_TOES = 'LeftToes'
LEFT_TOES_END = 'LeftToes_Effector'
LEFT_SHOULDER = 'LeftShoulder1'
LEFT_ARM = 'LeftArm'
LEFT_FORE_ARM = 'LeftForeArm'
LEFT_HAND = 'LeftHand'
LEFT_HAND_END = 'LeftHand_Effector'
RIGHT_SHOULDER = 'RightShoulder'
RIGHT_ARM = 'RightArm'
RIGHT_FORE_ARM = 'RightForeArm'
RIGHT_HAND = 'RightHand'
RIGHT_HAND_END = 'RightHand_Effector'
SPINE = 'Spine'
SPINE1 = 'Spine1'
HEAD = 'HEad'
HEAD_END = 'HEad_Effector'
LEFT_PHALANGE = 'LeftForeFoot'
RIGHT_PHALANGE = 'RightForeFoot'
LEFT_TARSUS = 'LeftRearFoot'
RIGHT_TARSUS = 'RightRearFoot'
LEFT_METATARSUS = 'LeftMidFoot'
RIGHT_METATARSUS = 'RightMidFoot'
LEFT_FOOT_SIDE_L = 'LeftFootSideL'
LEFT_FOOT_SIDE_R = 'LeftFootSideR'
RIGHT_FOOT_SIDE_L = 'RightFootSideL'
RIGHT_FOOT_SIDE_R = 'RightFootSideR'
'''
HIP = 'hip'
RIGHT_UP_LEG_DUMMY = 'rightuplegdummy'
RIGHT_UP_LEG = 'rightupleg'
RIGHT_LEG = 'rightleg'
RIGHT_FOOT = 'rightfoot'
RIGHT_TOES = 'righttoes'
RIGHT_TOES_END = 'righttoes_Effector'
LEFT_UP_LEG_DUMMY = 'leftuplegdummy'
LEFT_UP_LEG = 'leftupleg'
LEFT_LEG = 'leftleg'
LEFT_FOOT = 'leftfoot'
LEFT_TOES = 'lefttoes'
LEFT_TOES_END = 'lefttoes_Effector'
LEFT_SHOULDER_DUMMY = 'leftshoulder1dummy'
LEFT_SHOULDER = 'leftshoulder1'
LEFT_ARM = 'leftarm'
LEFT_FORE_ARM = 'leftforearm'
LEFT_HAND = 'lefthand'
LEFT_HAND_END = 'lefthand_Effector'
RIGHT_SHOULDER_DUMMY = 'rightshoulderdummy'
RIGHT_SHOULDER = 'rightshoulder'
RIGHT_ARM = 'rightarm'
RIGHT_FORE_ARM = 'rightforearm'
RIGHT_HAND = 'righthand'
RIGHT_HAND_END = 'righthand_Effector'
SPINE_DUMMY = 'spinedummy'
SPINE = 'spine'
SPINE1 = 'spine1'
HEAD_DUMMY = 'headdummy'
HEAD = 'head'
HEAD_END = 'head_Effector'
'''
STAND = 0
FORWARD_JUMP = 1
TAEKWONDO = 2
## Motion File
#MOTION = STAND
#MOTION = FORWARD_JUMP
MOTION = TAEKWONDO
FOOT_PART_NUM = 3
def create_vchain_5():
# motion
motion = yf.readBvhFile('vchain_5_rotate_root0.bvh', 1)
# world, model
mcfg = ypc.ModelConfig()
mcfg.defaultDensity = 1000.
mcfg.defaultBoneRatio = .8
for i in range(motion[0].skeleton.getElementNum()):
mcfg.addNode(motion[0].skeleton.getElementName(i))
node = mcfg.getNode('link0')
node.width = .3
node.mass = 6.
wcfg = ypc.WorldConfig()
wcfg.planeHeight = 0.
wcfg.useDefaultContactModel = False
stepsPerFrame = 60
wcfg.timeStep = (1/30.)/stepsPerFrame
# parameter
config = {}
config['Kt'] = 20; config['Dt'] = 2*(config['Kt']**.5) # tracking gain
config['Kl'] = 1; config['Dl'] = 2*(config['Kl']**.5) # linear balance gain
config['Kh'] = 1; config['Dh'] = 2*(config['Kh']**.5) # angular balance gain
config['Ks'] = 5000; config['Ds'] = 2*(config['Ks']**.5) # penalty force spring gain
config['Bt'] = 1.
config['Bl'] = 1.
config['Bh'] = 1.
# etc
config['weightMap'] = {}
config['supLink'] = 'link0'
return motion, mcfg, wcfg, stepsPerFrame, config
def create_biped():
# motion
#motionName = 'wd2_n_kick.bvh'
if MOTION == STAND:
motionName = 'wd2_stand.bvh'
elif MOTION == FORWARD_JUMP:
motionName = 'woddy2_jump0.bvh'
elif MOTION == TAEKWONDO :
motionName = './MotionFile/wd2_098_V001.bvh'
#motionName = 'ww13_41_V001.bvh'
motion = yf.readBvhFile(motionName, .01)
yme.removeJoint(motion, HEAD, False)
yme.removeJoint(motion, RIGHT_SHOULDER, False)
yme.removeJoint(motion, LEFT_SHOULDER, False)
if FOOT_PART_NUM == 1 :
yme.removeJoint(motion, RIGHT_TOES_END, False)
yme.removeJoint(motion, LEFT_TOES_END, False)
yme.removeJoint(motion, RIGHT_HAND_END, False)
yme.removeJoint(motion, LEFT_HAND_END, False)
yme.offsetJointLocal(motion, RIGHT_ARM, (.03,-.05,0), False)
yme.offsetJointLocal(motion, LEFT_ARM, (-.03,-.05,0), False)
yme.rotateJointLocal(motion, HIP, mm.exp(mm.v3(1,0,0), .01), False)
yme.rotateJointLocal(motion, LEFT_FOOT, mm.exp(mm.v3(2.5,-0.0,.3), -.5), False)
yme.rotateJointLocal(motion, RIGHT_FOOT, mm.exp(mm.v3(2.5,0.0,-.3), -.5), False)
if MOTION == FORWARD_JUMP:
yme.rotateJointLocal(motion, LEFT_UP_LEG, mm.exp(mm.v3(0.0,.0,1.), .08), False)
yme.rotateJointLocal(motion, LEFT_LEG, mm.exp(mm.v3(0.0,1.0,0.), -.2), False)
if FOOT_PART_NUM > 1:
yme.addJoint(motion, RIGHT_FOOT, RIGHT_TARSUS)
yme.addJoint(motion, RIGHT_TARSUS, 'RIGHT_Dummy1')
yme.addJoint(motion, LEFT_FOOT, LEFT_TARSUS)
yme.addJoint(motion, LEFT_TARSUS, 'LEFT_Dummy1')
yme.rotateJointLocal(motion, LEFT_TOES, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_TOES, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, LEFT_TARSUS, mm.exp(mm.v3(1.,0.0,0.0), .52), False)
yme.rotateJointLocal(motion, RIGHT_TARSUS, mm.exp(mm.v3(1.,0.0,0.0), .52), False)
if FOOT_PART_NUM == 5 :
yme.addJoint(motion, LEFT_FOOT, LEFT_FOOT_SIDE_L)
yme.addJoint(motion, LEFT_FOOT_SIDE_L, 'LEFT_Dummy2')
yme.addJoint(motion, LEFT_FOOT, LEFT_FOOT_SIDE_R)
yme.addJoint(motion, LEFT_FOOT_SIDE_R, 'LEFT_Dummy2')
yme.addJoint(motion, RIGHT_FOOT, RIGHT_FOOT_SIDE_L)
yme.addJoint(motion, RIGHT_FOOT_SIDE_L, 'RIGHT_Dummy2')
yme.addJoint(motion, RIGHT_FOOT, RIGHT_FOOT_SIDE_R)
yme.addJoint(motion, RIGHT_FOOT_SIDE_R, 'RIGHT_Dummy2')
yme.rotateJointLocal(motion, LEFT_FOOT_SIDE_L, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, LEFT_FOOT_SIDE_R, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_FOOT_SIDE_L, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_FOOT_SIDE_R, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.updateGlobalT(motion)
################
if MOTION == FORWARD_JUMP:
motion = motion[515:555]
elif MOTION == TAEKWONDO:
## Taekwondo base-step
motion = motion[0:31]
#motion = motion[564:600]
## Taekwondo turning-kick
#motion = motion[108:-1]
#motion = motion[108:109]
motion[0:0] = [motion[0]]*100
motion.extend([motion[-1]]*5000)
# world, model
mcfg = ypc.ModelConfig()
mcfg.defaultDensity = 1000.
mcfg.defaultBoneRatio = .9
for name in massMap:
node = mcfg.addNode(name)
node.mass = massMap[name]
node = mcfg.getNode(HIP)
node.length = .2
node.width = .25
node = mcfg.getNode(SPINE1)
node.length = .2
node.offset = (0,0,0.1)
node = mcfg.getNode(SPINE)
node.width = .22
#node.length = .2 ####
if FOOT_PART_NUM == 1 :
length1 = .25
width1 = .2
mass1 = 4.
elif FOOT_PART_NUM == 3:
length1 = .1
width1 = .2
mass1 = 1.5
length2 = .1
width2 = .2
mass2 = 1.5
elif FOOT_PART_NUM == 5:
length1 = .1
width1 = .065
mass1 = .5
length2 = .1
width2 = .2
mass2 = 1.5
node = mcfg.getNode(RIGHT_FOOT)
node.length = length1
node.width = width1
node.mass = mass1
node = mcfg.getNode(LEFT_FOOT)
node.length = length1
node.width = width1
node.mass = mass1
if FOOT_PART_NUM == 5:
node = mcfg.getNode(LEFT_FOOT_SIDE_L)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (0.07,0.0,0.015)
node = mcfg.getNode(LEFT_FOOT_SIDE_R)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (-0.07,0.0,0.015)
node = mcfg.getNode(RIGHT_FOOT_SIDE_L)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (0.07,0.0,0.015)
node = mcfg.getNode(RIGHT_FOOT_SIDE_R)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (-0.07,0.0,0.015)
if FOOT_PART_NUM > 1:
node = mcfg.getNode(LEFT_TOES)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.02)
node = mcfg.getNode(RIGHT_TOES)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.02)
node = mcfg.getNode(LEFT_TARSUS)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.08)
node = mcfg.getNode(RIGHT_TARSUS)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.08)
wcfg = ypc.WorldConfig()
wcfg.planeHeight = 0.
wcfg.useDefaultContactModel = False
stepsPerFrame = 30
wcfg.timeStep = (1/30.)/(stepsPerFrame)
#stepsPerFrame = 10
#wcfg.timeStep = (1/120.)/(stepsPerFrame)
#wcfg.timeStep = (1/1800.)
# parameter
config = {}
config['Kt'] = 200; config['Dt'] = 2*(config['Kt']**.5) # tracking gain
config['Kl'] = .10; config['Dl'] = 2*(config['Kl']**.5) # linear balance gain
config['Kh'] = 0.1; config['Dh'] = 2*(config['Kh']**.5) # angular balance gain
config['Ks'] = 20000; config['Ds'] = 2*(config['Ks']**.5) # penalty force spring gain
config['Bt'] = 1.
config['Bl'] = 1.#0.5
config['Bh'] = 1.
if FOOT_PART_NUM == 1:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.1, RIGHT_LEG:.3, LEFT_UP_LEG:.1, LEFT_LEG:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:1., LEFT_FOOT:1., HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1.}
elif FOOT_PART_NUM == 3:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.3, RIGHT_LEG:.3, LEFT_UP_LEG:.3, LEFT_LEG:.3, LEFT_TOES:.3, RIGHT_TOES:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:2.5, LEFT_FOOT:2.5, HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1., LEFT_TOES:.3, RIGHT_TOES:.3}
elif FOOT_PART_NUM == 5:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.1, RIGHT_LEG:.3, LEFT_UP_LEG:.1, LEFT_LEG:.3, LEFT_TOES:.3, RIGHT_TOES:.3, LEFT_TARSUS:.3, RIGHT_TARSUS:.3,
LEFT_FOOT_SIDE_L:.3, LEFT_FOOT_SIDE_R:.3, RIGHT_FOOT_SIDE_L:.3, RIGHT_FOOT_SIDE_R:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:2.5, LEFT_FOOT:2.5, HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1., LEFT_TOES:.3, RIGHT_TOES:.3, LEFT_TARSUS:.3, RIGHT_TARSUS:.3,
LEFT_FOOT_SIDE_L:.3, LEFT_FOOT_SIDE_R:.3, RIGHT_FOOT_SIDE_L:.3, RIGHT_FOOT_SIDE_R:.3}
config['supLink'] = LEFT_FOOT
config['supLink2'] = RIGHT_FOOT
#config['end'] = 'HIP'
config['end'] = SPINE1
config['const'] = HIP
config['root'] = HIP
config['FootPartNum'] = FOOT_PART_NUM
config['FootLPart'] = [LEFT_FOOT, LEFT_TOES, LEFT_TARSUS, LEFT_FOOT_SIDE_L, LEFT_FOOT_SIDE_R ]
config['FootRPart'] = [RIGHT_FOOT, RIGHT_TOES, RIGHT_TARSUS, RIGHT_FOOT_SIDE_L, RIGHT_FOOT_SIDE_R]
return motion, mcfg, wcfg, stepsPerFrame, config
#===============================================================================
# biped config
#===============================================================================
# motion, mesh config
g_motionDirConfigMap = {}
g_motionDirConfigMap['../Data/woody2/Motion/Physics2/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), .05), 'yOffset': .0, 'scale':1.,\
'rootRot': mm.I_SO3()}
g_motionDirConfigMap['../Data/woody2/Motion/Balancing/'] = \
{'footRot': mm.exp(mm.v3(1,-.5,0), -.6), 'yOffset': .0, 'scale':1.,\
'rootRot': mm.exp(mm.v3(1,0,0), .01)}
g_motionDirConfigMap['../Data/woody2/Motion/VideoMotion/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), -.05), 'yOffset': .01, 'scale':2.53999905501,\
'rootRot': mm.exp(mm.v3(1,0,0), .0)}
g_motionDirConfigMap['../Data/woody2/Motion/Samsung/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), -.03), 'yOffset': .0, 'scale':2.53999905501,\
'rootRot': mm.exp(mm.v3(1,0,0), .03)}
#===============================================================================
# # reloadable config
#===============================================================================
def buildMassMap():
massMap = {}
massMap = massMap.fromkeys([HEAD, HEAD_END, HIP, LEFT_ARM, LEFT_FOOT, LEFT_FORE_ARM, LEFT_HAND, LEFT_HAND_END, LEFT_LEG, LEFT_SHOULDER, LEFT_TOES, LEFT_TOES_END, LEFT_UP_LEG, RIGHT_ARM, RIGHT_FOOT, RIGHT_FORE_ARM, RIGHT_HAND, RIGHT_HAND_END, RIGHT_LEG, RIGHT_SHOULDER, RIGHT_TOES, RIGHT_TOES_END, RIGHT_UP_LEG, SPINE, SPINE1, LEFT_PHALANGE, RIGHT_PHALANGE, LEFT_TARSUS, RIGHT_TARSUS
, LEFT_FOOT_SIDE_L, LEFT_FOOT_SIDE_R, RIGHT_FOOT_SIDE_L, RIGHT_FOOT_SIDE_R], 0.)
# torso : 10
massMap[HIP] += 2.
#massMap[SPINE] += 8.
massMap[SPINE] += 8.
# head : 3
massMap[SPINE1] += 3.
# right upper arm : 2
massMap[RIGHT_ARM] += 2.
# left upper arm : 2
massMap[LEFT_ARM] += 2.
# right lower arm : 1
#massMap[RIGHT_FORE_ARM] = 1.
massMap[RIGHT_FORE_ARM] = 2.
# left lower arm : 1
#massMap[LEFT_FORE_ARM] = 1.
massMap[LEFT_FORE_ARM] = 2.
# right thigh : 7
massMap[HIP] += 2.
massMap[RIGHT_UP_LEG] += 5.
# left thigh : 7
massMap[HIP] += 2.
massMap[LEFT_UP_LEG] += 5.
# right shin : 5
massMap[RIGHT_LEG] += 5.
# left shin : 5
massMap[LEFT_LEG] += 5.
# right foot : 4
massMap[RIGHT_FOOT] += 2.
# left foot : 4
massMap[LEFT_FOOT] += 2.
massMap[LEFT_TOES] += 2.
massMap[RIGHT_TOES] += 2.
massMap[LEFT_TARSUS] += 2.
massMap[RIGHT_TARSUS] += 2.
return massMap
massMap = buildMassMap()
| 34.904018
| 387
| 0.592249
|
import copy
import sys
if './modules' not in sys.path:
sys.path.append('./modules')
import Resource.ysMotionLoader as yf
import Simulator.ysPhysConfig as ypc
import Math.mmMath as mm
import Motion.ysHierarchyEdit as yme
import Motion.ysMotion as ym
s'
RIGHT_UP_LEG = 'RightUpLeg'
RIGHT_LEG = 'RightLeg'
RIGHT_FOOT = 'RightFoot'
RIGHT_TOES = 'RightToes'
RIGHT_TOES_END = 'RightToes_Effector'
LEFT_UP_LEG = 'LeftUpLeg'
LEFT_LEG = 'LeftLeg'
LEFT_FOOT = 'LeftFoot'
LEFT_TOES = 'LeftToes'
LEFT_TOES_END = 'LeftToes_Effector'
LEFT_SHOULDER = 'LeftShoulder1'
LEFT_ARM = 'LeftArm'
LEFT_FORE_ARM = 'LeftForeArm'
LEFT_HAND = 'LeftHand'
LEFT_HAND_END = 'LeftHand_Effector'
RIGHT_SHOULDER = 'RightShoulder'
RIGHT_ARM = 'RightArm'
RIGHT_FORE_ARM = 'RightForeArm'
RIGHT_HAND = 'RightHand'
RIGHT_HAND_END = 'RightHand_Effector'
SPINE = 'Spine'
SPINE1 = 'Spine1'
HEAD = 'HEad'
HEAD_END = 'HEad_Effector'
LEFT_PHALANGE = 'LeftForeFoot'
RIGHT_PHALANGE = 'RightForeFoot'
LEFT_TARSUS = 'LeftRearFoot'
RIGHT_TARSUS = 'RightRearFoot'
LEFT_METATARSUS = 'LeftMidFoot'
RIGHT_METATARSUS = 'RightMidFoot'
LEFT_FOOT_SIDE_L = 'LeftFootSideL'
LEFT_FOOT_SIDE_R = 'LeftFootSideR'
RIGHT_FOOT_SIDE_L = 'RightFootSideL'
RIGHT_FOOT_SIDE_R = 'RightFootSideR'
STAND = 0
FORWARD_JUMP = 1
TAEKWONDO = 2
EKWONDO
FOOT_PART_NUM = 3
def create_vchain_5():
motion = yf.readBvhFile('vchain_5_rotate_root0.bvh', 1)
mcfg = ypc.ModelConfig()
mcfg.defaultDensity = 1000.
mcfg.defaultBoneRatio = .8
for i in range(motion[0].skeleton.getElementNum()):
mcfg.addNode(motion[0].skeleton.getElementName(i))
node = mcfg.getNode('link0')
node.width = .3
node.mass = 6.
wcfg = ypc.WorldConfig()
wcfg.planeHeight = 0.
wcfg.useDefaultContactModel = False
stepsPerFrame = 60
wcfg.timeStep = (1/30.)/stepsPerFrame
config = {}
config['Kt'] = 20; config['Dt'] = 2*(config['Kt']**.5)
config['Kl'] = 1; config['Dl'] = 2*(config['Kl']**.5)
config['Kh'] = 1; config['Dh'] = 2*(config['Kh']**.5)
config['Ks'] = 5000; config['Ds'] = 2*(config['Ks']**.5)
config['Bt'] = 1.
config['Bl'] = 1.
config['Bh'] = 1.
config['weightMap'] = {}
config['supLink'] = 'link0'
return motion, mcfg, wcfg, stepsPerFrame, config
def create_biped():
if MOTION == STAND:
motionName = 'wd2_stand.bvh'
elif MOTION == FORWARD_JUMP:
motionName = 'woddy2_jump0.bvh'
elif MOTION == TAEKWONDO :
motionName = './MotionFile/wd2_098_V001.bvh'
motion = yf.readBvhFile(motionName, .01)
yme.removeJoint(motion, HEAD, False)
yme.removeJoint(motion, RIGHT_SHOULDER, False)
yme.removeJoint(motion, LEFT_SHOULDER, False)
if FOOT_PART_NUM == 1 :
yme.removeJoint(motion, RIGHT_TOES_END, False)
yme.removeJoint(motion, LEFT_TOES_END, False)
yme.removeJoint(motion, RIGHT_HAND_END, False)
yme.removeJoint(motion, LEFT_HAND_END, False)
yme.offsetJointLocal(motion, RIGHT_ARM, (.03,-.05,0), False)
yme.offsetJointLocal(motion, LEFT_ARM, (-.03,-.05,0), False)
yme.rotateJointLocal(motion, HIP, mm.exp(mm.v3(1,0,0), .01), False)
yme.rotateJointLocal(motion, LEFT_FOOT, mm.exp(mm.v3(2.5,-0.0,.3), -.5), False)
yme.rotateJointLocal(motion, RIGHT_FOOT, mm.exp(mm.v3(2.5,0.0,-.3), -.5), False)
if MOTION == FORWARD_JUMP:
yme.rotateJointLocal(motion, LEFT_UP_LEG, mm.exp(mm.v3(0.0,.0,1.), .08), False)
yme.rotateJointLocal(motion, LEFT_LEG, mm.exp(mm.v3(0.0,1.0,0.), -.2), False)
if FOOT_PART_NUM > 1:
yme.addJoint(motion, RIGHT_FOOT, RIGHT_TARSUS)
yme.addJoint(motion, RIGHT_TARSUS, 'RIGHT_Dummy1')
yme.addJoint(motion, LEFT_FOOT, LEFT_TARSUS)
yme.addJoint(motion, LEFT_TARSUS, 'LEFT_Dummy1')
yme.rotateJointLocal(motion, LEFT_TOES, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_TOES, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, LEFT_TARSUS, mm.exp(mm.v3(1.,0.0,0.0), .52), False)
yme.rotateJointLocal(motion, RIGHT_TARSUS, mm.exp(mm.v3(1.,0.0,0.0), .52), False)
if FOOT_PART_NUM == 5 :
yme.addJoint(motion, LEFT_FOOT, LEFT_FOOT_SIDE_L)
yme.addJoint(motion, LEFT_FOOT_SIDE_L, 'LEFT_Dummy2')
yme.addJoint(motion, LEFT_FOOT, LEFT_FOOT_SIDE_R)
yme.addJoint(motion, LEFT_FOOT_SIDE_R, 'LEFT_Dummy2')
yme.addJoint(motion, RIGHT_FOOT, RIGHT_FOOT_SIDE_L)
yme.addJoint(motion, RIGHT_FOOT_SIDE_L, 'RIGHT_Dummy2')
yme.addJoint(motion, RIGHT_FOOT, RIGHT_FOOT_SIDE_R)
yme.addJoint(motion, RIGHT_FOOT_SIDE_R, 'RIGHT_Dummy2')
yme.rotateJointLocal(motion, LEFT_FOOT_SIDE_L, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, LEFT_FOOT_SIDE_R, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_FOOT_SIDE_L, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.rotateJointLocal(motion, RIGHT_FOOT_SIDE_R, mm.exp(mm.v3(1.,0.0,0.0), .45), False)
yme.updateGlobalT(motion)
tion[0:0] = [motion[0]]*100
motion.extend([motion[-1]]*5000)
mcfg = ypc.ModelConfig()
mcfg.defaultDensity = 1000.
mcfg.defaultBoneRatio = .9
for name in massMap:
node = mcfg.addNode(name)
node.mass = massMap[name]
node = mcfg.getNode(HIP)
node.length = .2
node.width = .25
node = mcfg.getNode(SPINE1)
node.length = .2
node.offset = (0,0,0.1)
node = mcfg.getNode(SPINE)
node.width = .22
FOOT_PART_NUM == 1 :
length1 = .25
width1 = .2
mass1 = 4.
elif FOOT_PART_NUM == 3:
length1 = .1
width1 = .2
mass1 = 1.5
length2 = .1
width2 = .2
mass2 = 1.5
elif FOOT_PART_NUM == 5:
length1 = .1
width1 = .065
mass1 = .5
length2 = .1
width2 = .2
mass2 = 1.5
node = mcfg.getNode(RIGHT_FOOT)
node.length = length1
node.width = width1
node.mass = mass1
node = mcfg.getNode(LEFT_FOOT)
node.length = length1
node.width = width1
node.mass = mass1
if FOOT_PART_NUM == 5:
node = mcfg.getNode(LEFT_FOOT_SIDE_L)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (0.07,0.0,0.015)
node = mcfg.getNode(LEFT_FOOT_SIDE_R)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (-0.07,0.0,0.015)
node = mcfg.getNode(RIGHT_FOOT_SIDE_L)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (0.07,0.0,0.015)
node = mcfg.getNode(RIGHT_FOOT_SIDE_R)
node.length = length1
node.width = width1
node.mass = mass1
node.offset = (-0.07,0.0,0.015)
if FOOT_PART_NUM > 1:
node = mcfg.getNode(LEFT_TOES)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.02)
node = mcfg.getNode(RIGHT_TOES)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.02)
node = mcfg.getNode(LEFT_TARSUS)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.08)
node = mcfg.getNode(RIGHT_TARSUS)
node.length = length2
node.width = width2
node.mass = mass2
node.offset = (0,0.0,-0.08)
wcfg = ypc.WorldConfig()
wcfg.planeHeight = 0.
wcfg.useDefaultContactModel = False
stepsPerFrame = 30
wcfg.timeStep = (1/30.)/(stepsPerFrame)
config = {}
config['Kt'] = 200; config['Dt'] = 2*(config['Kt']**.5)
config['Kl'] = .10; config['Dl'] = 2*(config['Kl']**.5)
config['Kh'] = 0.1; config['Dh'] = 2*(config['Kh']**.5)
config['Ks'] = 20000; config['Ds'] = 2*(config['Ks']**.5)
config['Bt'] = 1.
config['Bl'] = 1.
config['Bh'] = 1.
if FOOT_PART_NUM == 1:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.1, RIGHT_LEG:.3, LEFT_UP_LEG:.1, LEFT_LEG:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:1., LEFT_FOOT:1., HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1.}
elif FOOT_PART_NUM == 3:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.3, RIGHT_LEG:.3, LEFT_UP_LEG:.3, LEFT_LEG:.3, LEFT_TOES:.3, RIGHT_TOES:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:2.5, LEFT_FOOT:2.5, HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1., LEFT_TOES:.3, RIGHT_TOES:.3}
elif FOOT_PART_NUM == 5:
config['weightMap']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:.3, SPINE1:.3, RIGHT_FOOT:.3, LEFT_FOOT:.3, HIP:.5,
RIGHT_UP_LEG:.1, RIGHT_LEG:.3, LEFT_UP_LEG:.1, LEFT_LEG:.3, LEFT_TOES:.3, RIGHT_TOES:.3, LEFT_TARSUS:.3, RIGHT_TARSUS:.3,
LEFT_FOOT_SIDE_L:.3, LEFT_FOOT_SIDE_R:.3, RIGHT_FOOT_SIDE_L:.3, RIGHT_FOOT_SIDE_R:.3}
config['weightMap2']={RIGHT_ARM:.2, RIGHT_FORE_ARM:.2, LEFT_ARM:.2, LEFT_FORE_ARM:.2, SPINE:1., SPINE1:.3, RIGHT_FOOT:2.5, LEFT_FOOT:2.5, HIP:1.,
RIGHT_UP_LEG:1., RIGHT_LEG:1., LEFT_UP_LEG:1., LEFT_LEG:1., LEFT_TOES:.3, RIGHT_TOES:.3, LEFT_TARSUS:.3, RIGHT_TARSUS:.3,
LEFT_FOOT_SIDE_L:.3, LEFT_FOOT_SIDE_R:.3, RIGHT_FOOT_SIDE_L:.3, RIGHT_FOOT_SIDE_R:.3}
config['supLink'] = LEFT_FOOT
config['supLink2'] = RIGHT_FOOT
config['end'] = SPINE1
config['const'] = HIP
config['root'] = HIP
config['FootPartNum'] = FOOT_PART_NUM
config['FootLPart'] = [LEFT_FOOT, LEFT_TOES, LEFT_TARSUS, LEFT_FOOT_SIDE_L, LEFT_FOOT_SIDE_R ]
config['FootRPart'] = [RIGHT_FOOT, RIGHT_TOES, RIGHT_TARSUS, RIGHT_FOOT_SIDE_L, RIGHT_FOOT_SIDE_R]
return motion, mcfg, wcfg, stepsPerFrame, config
g_motionDirConfigMap = {}
g_motionDirConfigMap['../Data/woody2/Motion/Physics2/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), .05), 'yOffset': .0, 'scale':1.,\
'rootRot': mm.I_SO3()}
g_motionDirConfigMap['../Data/woody2/Motion/Balancing/'] = \
{'footRot': mm.exp(mm.v3(1,-.5,0), -.6), 'yOffset': .0, 'scale':1.,\
'rootRot': mm.exp(mm.v3(1,0,0), .01)}
g_motionDirConfigMap['../Data/woody2/Motion/VideoMotion/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), -.05), 'yOffset': .01, 'scale':2.53999905501,\
'rootRot': mm.exp(mm.v3(1,0,0), .0)}
g_motionDirConfigMap['../Data/woody2/Motion/Samsung/'] = \
{'footRot': mm.exp(mm.v3(1,0,0), -.03), 'yOffset': .0, 'scale':2.53999905501,\
'rootRot': mm.exp(mm.v3(1,0,0), .03)}
:
massMap = {}
massMap = massMap.fromkeys([HEAD, HEAD_END, HIP, LEFT_ARM, LEFT_FOOT, LEFT_FORE_ARM, LEFT_HAND, LEFT_HAND_END, LEFT_LEG, LEFT_SHOULDER, LEFT_TOES, LEFT_TOES_END, LEFT_UP_LEG, RIGHT_ARM, RIGHT_FOOT, RIGHT_FORE_ARM, RIGHT_HAND, RIGHT_HAND_END, RIGHT_LEG, RIGHT_SHOULDER, RIGHT_TOES, RIGHT_TOES_END, RIGHT_UP_LEG, SPINE, SPINE1, LEFT_PHALANGE, RIGHT_PHALANGE, LEFT_TARSUS, RIGHT_TARSUS
, LEFT_FOOT_SIDE_L, LEFT_FOOT_SIDE_R, RIGHT_FOOT_SIDE_L, RIGHT_FOOT_SIDE_R], 0.)
massMap[HIP] += 2.
massMap[SPINE] += 8.
massMap[SPINE1] += 3.
massMap[RIGHT_ARM] += 2.
massMap[LEFT_ARM] += 2.
massMap[RIGHT_FORE_ARM] = 2.
massMap[LEFT_FORE_ARM] = 2.
massMap[HIP] += 2.
massMap[RIGHT_UP_LEG] += 5.
massMap[HIP] += 2.
massMap[LEFT_UP_LEG] += 5.
massMap[RIGHT_LEG] += 5.
massMap[LEFT_LEG] += 5.
massMap[RIGHT_FOOT] += 2.
massMap[LEFT_FOOT] += 2.
massMap[LEFT_TOES] += 2.
massMap[RIGHT_TOES] += 2.
massMap[LEFT_TARSUS] += 2.
massMap[RIGHT_TARSUS] += 2.
return massMap
massMap = buildMassMap()
| true
| true
|
f7156beba4fa44c0a6d9b574e93c8eddef795b71
| 10,381
|
py
|
Python
|
python/ccxt/async/bitstamp1.py
|
destenson/ccxt--ccxt
|
3928a058cb1ecf00d11309c7812a0fcdb502080a
|
[
"MIT"
] | null | null | null |
python/ccxt/async/bitstamp1.py
|
destenson/ccxt--ccxt
|
3928a058cb1ecf00d11309c7812a0fcdb502080a
|
[
"MIT"
] | null | null | null |
python/ccxt/async/bitstamp1.py
|
destenson/ccxt--ccxt
|
3928a058cb1ecf00d11309c7812a0fcdb502080a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
class bitstamp1 (Exchange):
def describe(self):
return self.deep_extend(super(bitstamp1, self).describe(), {
'id': 'bitstamp1',
'name': 'Bitstamp v1',
'countries': 'GB',
'rateLimit': 1000,
'version': 'v1',
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27786377-8c8ab57e-5fe9-11e7-8ea4-2b05b6bcceec.jpg',
'api': 'https://www.bitstamp.net/api',
'www': 'https://www.bitstamp.net',
'doc': 'https://www.bitstamp.net/api',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'ticker',
'ticker_hour',
'order_book',
'transactions',
'eur_usd',
],
},
'private': {
'post': [
'balance',
'user_transactions',
'open_orders',
'order_status',
'cancel_order',
'cancel_all_orders',
'buy',
'sell',
'bitcoin_deposit_address',
'unconfirmed_btc',
'ripple_withdrawal',
'ripple_address',
'withdrawal_requests',
'bitcoin_withdrawal',
],
},
},
'markets': {
'BTC/USD': {'id': 'btcusd', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'BTC/EUR': {'id': 'btceur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'EUR/USD': {'id': 'eurusd', 'symbol': 'EUR/USD', 'base': 'EUR', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'XRP/USD': {'id': 'xrpusd', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'XRP/EUR': {'id': 'xrpeur', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'XRP/BTC': {'id': 'xrpbtc', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
'LTC/USD': {'id': 'ltcusd', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'LTC/EUR': {'id': 'ltceur', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'LTC/BTC': {'id': 'ltcbtc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
'ETH/USD': {'id': 'ethusd', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'ETH/EUR': {'id': 'etheur', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'ETH/BTC': {'id': 'ethbtc', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
},
})
async def fetch_order_book(self, symbol, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchOrderBook doesn't support " + symbol + ', use it for BTC/USD only')
orderbook = await self.publicGetOrderBook(params)
timestamp = int(orderbook['timestamp']) * 1000
return self.parse_order_book(orderbook, timestamp)
async def fetch_ticker(self, symbol, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchTicker doesn't support " + symbol + ', use it for BTC/USD only')
ticker = await self.publicGetTicker(params)
timestamp = int(ticker['timestamp']) * 1000
vwap = float(ticker['vwap'])
baseVolume = float(ticker['volume'])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': vwap,
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = None
if 'date' in trade:
timestamp = int(trade['date']) * 1000
elif 'datetime' in trade:
# timestamp = self.parse8601(trade['datetime'])
timestamp = int(trade['datetime']) * 1000
side = 'buy' if (trade['type'] == 0) else 'sell'
order = None
if 'order_id' in trade:
order = str(trade['order_id'])
if 'currency_pair' in trade:
if trade['currency_pair'] in self.markets_by_id:
market = self.markets_by_id[trade['currency_pair']]
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': order,
'type': None,
'side': side,
'price': float(trade['price']),
'amount': float(trade['amount']),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchTrades doesn't support " + symbol + ', use it for BTC/USD only')
market = self.market(symbol)
response = await self.publicGetTransactions(self.extend({
'time': 'minute',
}, params))
return self.parse_trades(response, market, since, limit)
async def fetch_balance(self, params={}):
balance = await self.privatePostBalance()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercase = currency.lower()
total = lowercase + '_balance'
free = lowercase + '_available'
used = lowercase + '_reserved'
account = self.account()
account['free'] = self.safe_float(balance, free, 0.0)
account['used'] = self.safe_float(balance, used, 0.0)
account['total'] = self.safe_float(balance, total, 0.0)
result[currency] = account
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' ' + self.version + ' accepts limit orders only')
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' v1 supports BTC/USD orders only')
method = 'privatePost' + self.capitalize(side)
order = {
'amount': amount,
'price': price,
}
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancelOrder({'id': id})
def parse_order_status(self, order):
if (order['status'] == 'Queue') or (order['status'] == 'Open'):
return 'open'
if order['status'] == 'Finished':
return 'closed'
return order['status']
async def fetch_order_status(self, id, symbol=None):
await self.load_markets()
response = await self.privatePostOrderStatus({'id': id})
return self.parse_order_status(response)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol:
market = self.market(symbol)
pair = market['id'] if market else 'all'
request = self.extend({'id': pair}, params)
response = await self.privatePostOpenOrdersId(request)
return self.parse_trades(response, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
raise NotSupported(self.id + ' fetchOrder is not implemented yet')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.encode(self.hmac(self.encode(auth), self.encode(self.secret)))
query = self.extend({
'key': self.apiKey,
'signature': signature.upper(),
'nonce': nonce,
}, query)
body = self.urlencode(query)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'status' in response:
if response['status'] == 'error':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 44.174468
| 137
| 0.50814
|
from ccxt.async.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
class bitstamp1 (Exchange):
def describe(self):
return self.deep_extend(super(bitstamp1, self).describe(), {
'id': 'bitstamp1',
'name': 'Bitstamp v1',
'countries': 'GB',
'rateLimit': 1000,
'version': 'v1',
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27786377-8c8ab57e-5fe9-11e7-8ea4-2b05b6bcceec.jpg',
'api': 'https://www.bitstamp.net/api',
'www': 'https://www.bitstamp.net',
'doc': 'https://www.bitstamp.net/api',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'ticker',
'ticker_hour',
'order_book',
'transactions',
'eur_usd',
],
},
'private': {
'post': [
'balance',
'user_transactions',
'open_orders',
'order_status',
'cancel_order',
'cancel_all_orders',
'buy',
'sell',
'bitcoin_deposit_address',
'unconfirmed_btc',
'ripple_withdrawal',
'ripple_address',
'withdrawal_requests',
'bitcoin_withdrawal',
],
},
},
'markets': {
'BTC/USD': {'id': 'btcusd', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'BTC/EUR': {'id': 'btceur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'EUR/USD': {'id': 'eurusd', 'symbol': 'EUR/USD', 'base': 'EUR', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'XRP/USD': {'id': 'xrpusd', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'XRP/EUR': {'id': 'xrpeur', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'XRP/BTC': {'id': 'xrpbtc', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
'LTC/USD': {'id': 'ltcusd', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'LTC/EUR': {'id': 'ltceur', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'LTC/BTC': {'id': 'ltcbtc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
'ETH/USD': {'id': 'ethusd', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'ETH/EUR': {'id': 'etheur', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'ETH/BTC': {'id': 'ethbtc', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
},
})
async def fetch_order_book(self, symbol, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchOrderBook doesn't support " + symbol + ', use it for BTC/USD only')
orderbook = await self.publicGetOrderBook(params)
timestamp = int(orderbook['timestamp']) * 1000
return self.parse_order_book(orderbook, timestamp)
async def fetch_ticker(self, symbol, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchTicker doesn't support " + symbol + ', use it for BTC/USD only')
ticker = await self.publicGetTicker(params)
timestamp = int(ticker['timestamp']) * 1000
vwap = float(ticker['vwap'])
baseVolume = float(ticker['volume'])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': vwap,
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = None
if 'date' in trade:
timestamp = int(trade['date']) * 1000
elif 'datetime' in trade:
timestamp = int(trade['datetime']) * 1000
side = 'buy' if (trade['type'] == 0) else 'sell'
order = None
if 'order_id' in trade:
order = str(trade['order_id'])
if 'currency_pair' in trade:
if trade['currency_pair'] in self.markets_by_id:
market = self.markets_by_id[trade['currency_pair']]
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': order,
'type': None,
'side': side,
'price': float(trade['price']),
'amount': float(trade['amount']),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchTrades doesn't support " + symbol + ', use it for BTC/USD only')
market = self.market(symbol)
response = await self.publicGetTransactions(self.extend({
'time': 'minute',
}, params))
return self.parse_trades(response, market, since, limit)
async def fetch_balance(self, params={}):
balance = await self.privatePostBalance()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercase = currency.lower()
total = lowercase + '_balance'
free = lowercase + '_available'
used = lowercase + '_reserved'
account = self.account()
account['free'] = self.safe_float(balance, free, 0.0)
account['used'] = self.safe_float(balance, used, 0.0)
account['total'] = self.safe_float(balance, total, 0.0)
result[currency] = account
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' ' + self.version + ' accepts limit orders only')
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' v1 supports BTC/USD orders only')
method = 'privatePost' + self.capitalize(side)
order = {
'amount': amount,
'price': price,
}
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancelOrder({'id': id})
def parse_order_status(self, order):
if (order['status'] == 'Queue') or (order['status'] == 'Open'):
return 'open'
if order['status'] == 'Finished':
return 'closed'
return order['status']
async def fetch_order_status(self, id, symbol=None):
await self.load_markets()
response = await self.privatePostOrderStatus({'id': id})
return self.parse_order_status(response)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol:
market = self.market(symbol)
pair = market['id'] if market else 'all'
request = self.extend({'id': pair}, params)
response = await self.privatePostOpenOrdersId(request)
return self.parse_trades(response, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
raise NotSupported(self.id + ' fetchOrder is not implemented yet')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.encode(self.hmac(self.encode(auth), self.encode(self.secret)))
query = self.extend({
'key': self.apiKey,
'signature': signature.upper(),
'nonce': nonce,
}, query)
body = self.urlencode(query)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'status' in response:
if response['status'] == 'error':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| false
| true
|
f7156c17c1c2dac9f185a10f4aef638483c87e61
| 976
|
py
|
Python
|
core/forms.py
|
donnellan0007/blog
|
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
|
[
"MIT"
] | null | null | null |
core/forms.py
|
donnellan0007/blog
|
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
|
[
"MIT"
] | null | null | null |
core/forms.py
|
donnellan0007/blog
|
02c8850688422e3b685ffac10c32bf3e7a7c2e7a
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class ProfileForm(UserCreationForm):
email = forms.EmailField(widget=forms.TextInput(
attrs = {
'type' : 'email',
'placeholder' : ('Email')
}
))
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
widgets = {
'username': forms.TextInput(attrs={'placeholder': 'Username'}),
'first_name': forms.TextInput(attrs={'placeholder': 'First Name'}),
'last_name': forms.TextInput(attrs={'placeholder': 'Last Name'}),
'email': forms.TextInput(attrs={'placeholder': 'Email'}),
}
def clean(self):
cleaned_data = super(ProfileForm,self).clean()
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
| 36.148148
| 79
| 0.609631
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class ProfileForm(UserCreationForm):
email = forms.EmailField(widget=forms.TextInput(
attrs = {
'type' : 'email',
'placeholder' : ('Email')
}
))
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
widgets = {
'username': forms.TextInput(attrs={'placeholder': 'Username'}),
'first_name': forms.TextInput(attrs={'placeholder': 'First Name'}),
'last_name': forms.TextInput(attrs={'placeholder': 'Last Name'}),
'email': forms.TextInput(attrs={'placeholder': 'Email'}),
}
def clean(self):
cleaned_data = super(ProfileForm,self).clean()
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
| true
| true
|
f7156c4bf5fa7bbe5bdf31d0caf7e0a157cf1469
| 4,015
|
py
|
Python
|
tests/parser/functions/test_concat.py
|
Solexplorer/vyper
|
135edd6a91d47c72de105066d6e6c1bdfe9ea66e
|
[
"MIT"
] | 1
|
2021-04-23T21:48:20.000Z
|
2021-04-23T21:48:20.000Z
|
tests/parser/functions/test_concat.py
|
Solexplorer/vyper
|
135edd6a91d47c72de105066d6e6c1bdfe9ea66e
|
[
"MIT"
] | null | null | null |
tests/parser/functions/test_concat.py
|
Solexplorer/vyper
|
135edd6a91d47c72de105066d6e6c1bdfe9ea66e
|
[
"MIT"
] | 1
|
2020-01-27T05:21:46.000Z
|
2020-01-27T05:21:46.000Z
|
from vyper.exceptions import (
TypeMismatchException,
)
def test_concat(get_contract_with_gas_estimation):
test_concat = """
@public
def foo2(input1: bytes[50], input2: bytes[50]) -> bytes[1000]:
return concat(input1, input2)
@public
def foo3(input1: bytes[50], input2: bytes[50], input3: bytes[50]) -> bytes[1000]:
return concat(input1, input2, input3)
"""
c = get_contract_with_gas_estimation(test_concat)
assert c.foo2(b"h", b"orse") == b"horse"
assert c.foo2(b"h", b"") == b"h"
assert c.foo2(b"", b"") == b""
assert c.foo2(b"", b"orse") == b"orse"
assert c.foo3(b"Buffalo", b" ", b"buffalo") == b"Buffalo buffalo"
assert c.foo2(b"\x36", b"\x35" * 32) == b"\x36" + b"\x35" * 32
assert c.foo2(b"\x36" * 48, b"\x35" * 32) == b"\x36" * 48 + b"\x35" * 32
assert c.foo3(b"horses" * 4, b"mice" * 7, b"crows" * 10) == b"horses" * 4 + b"mice" * 7 + b"crows" * 10 # noqa: E501
print('Passed simple concat test')
def test_concat2(get_contract_with_gas_estimation):
test_concat2 = """
@public
def foo(inp: bytes[50]) -> bytes[1000]:
x: bytes[50] = inp
return concat(x, inp, x, inp, x, inp, x, inp, x, inp)
"""
c = get_contract_with_gas_estimation(test_concat2)
assert c.foo(b"horse" * 9 + b"vyper") == (b"horse" * 9 + b"vyper") * 10
print('Passed second concat test')
def test_crazy_concat_code(get_contract_with_gas_estimation):
crazy_concat_code = """
y: bytes[10]
@public
def krazykonkat(z: bytes[10]) -> bytes[25]:
x: bytes[3] = "cow"
self.y = "horse"
return concat(x, b" ", self.y, b" ", z)
"""
c = get_contract_with_gas_estimation(crazy_concat_code)
assert c.krazykonkat(b"moose") == b'cow horse moose'
print('Passed third concat test')
def test_concat_bytes32(get_contract_with_gas_estimation):
test_concat_bytes32 = """
@public
def sandwich(inp: bytes[100], inp2: bytes32) -> bytes[164]:
return concat(inp2, inp, inp2)
@public
def fivetimes(inp: bytes32) -> bytes[160]:
return concat(inp, inp, inp, inp, inp)
"""
c = get_contract_with_gas_estimation(test_concat_bytes32)
assert c.sandwich(b"cow", b"\x35" * 32) == b"\x35" * 32 + b"cow" + b"\x35" * 32, c.sandwich(b"cow", b"\x35" * 32) # noqa: E501
assert c.sandwich(b"", b"\x46" * 32) == b"\x46" * 64
assert c.sandwich(b"\x57" * 95, b"\x57" * 32) == b"\x57" * 159
assert c.sandwich(b"\x57" * 96, b"\x57" * 32) == b"\x57" * 160
assert c.sandwich(b"\x57" * 97, b"\x57" * 32) == b"\x57" * 161
assert c.fivetimes(b"mongoose" * 4) == b"mongoose" * 20
print("Passed concat bytes32 test")
def test_konkat_code(get_contract_with_gas_estimation):
konkat_code = """
ecks: bytes32
@public
def foo(x: bytes32, y: bytes32) -> bytes[64]:
self.ecks = x
return concat(self.ecks, y)
@public
def goo(x: bytes32, y: bytes32) -> bytes[64]:
self.ecks = x
return concat(self.ecks, y)
@public
def hoo(x: bytes32, y: bytes32) -> bytes[64]:
return concat(x, y)
"""
c = get_contract_with_gas_estimation(konkat_code)
assert c.foo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.goo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.hoo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
print('Passed second concat tests')
def test_small_output(get_contract_with_gas_estimation):
code = """
@public
def small_output(a: string[5], b: string[4]) -> string[9]:
c: string[9] = concat(a, b)
return c
"""
c = get_contract_with_gas_estimation(code)
assert c.small_output('abcde', 'fghi') == 'abcdefghi'
assert c.small_output('', '') == ''
def test_large_output(get_contract_with_gas_estimation, assert_compile_failed):
code = """
@public
def large_output(a: string[33], b: string[33]) -> string[64]:
c: string[64] = concat(a, b)
return c
"""
assert_compile_failed(
lambda: get_contract_with_gas_estimation(code),
TypeMismatchException
)
| 30.18797
| 131
| 0.62142
|
from vyper.exceptions import (
TypeMismatchException,
)
def test_concat(get_contract_with_gas_estimation):
test_concat = """
@public
def foo2(input1: bytes[50], input2: bytes[50]) -> bytes[1000]:
return concat(input1, input2)
@public
def foo3(input1: bytes[50], input2: bytes[50], input3: bytes[50]) -> bytes[1000]:
return concat(input1, input2, input3)
"""
c = get_contract_with_gas_estimation(test_concat)
assert c.foo2(b"h", b"orse") == b"horse"
assert c.foo2(b"h", b"") == b"h"
assert c.foo2(b"", b"") == b""
assert c.foo2(b"", b"orse") == b"orse"
assert c.foo3(b"Buffalo", b" ", b"buffalo") == b"Buffalo buffalo"
assert c.foo2(b"\x36", b"\x35" * 32) == b"\x36" + b"\x35" * 32
assert c.foo2(b"\x36" * 48, b"\x35" * 32) == b"\x36" * 48 + b"\x35" * 32
assert c.foo3(b"horses" * 4, b"mice" * 7, b"crows" * 10) == b"horses" * 4 + b"mice" * 7 + b"crows" * 10
print('Passed simple concat test')
def test_concat2(get_contract_with_gas_estimation):
test_concat2 = """
@public
def foo(inp: bytes[50]) -> bytes[1000]:
x: bytes[50] = inp
return concat(x, inp, x, inp, x, inp, x, inp, x, inp)
"""
c = get_contract_with_gas_estimation(test_concat2)
assert c.foo(b"horse" * 9 + b"vyper") == (b"horse" * 9 + b"vyper") * 10
print('Passed second concat test')
def test_crazy_concat_code(get_contract_with_gas_estimation):
crazy_concat_code = """
y: bytes[10]
@public
def krazykonkat(z: bytes[10]) -> bytes[25]:
x: bytes[3] = "cow"
self.y = "horse"
return concat(x, b" ", self.y, b" ", z)
"""
c = get_contract_with_gas_estimation(crazy_concat_code)
assert c.krazykonkat(b"moose") == b'cow horse moose'
print('Passed third concat test')
def test_concat_bytes32(get_contract_with_gas_estimation):
test_concat_bytes32 = """
@public
def sandwich(inp: bytes[100], inp2: bytes32) -> bytes[164]:
return concat(inp2, inp, inp2)
@public
def fivetimes(inp: bytes32) -> bytes[160]:
return concat(inp, inp, inp, inp, inp)
"""
c = get_contract_with_gas_estimation(test_concat_bytes32)
assert c.sandwich(b"cow", b"\x35" * 32) == b"\x35" * 32 + b"cow" + b"\x35" * 32, c.sandwich(b"cow", b"\x35" * 32)
assert c.sandwich(b"", b"\x46" * 32) == b"\x46" * 64
assert c.sandwich(b"\x57" * 95, b"\x57" * 32) == b"\x57" * 159
assert c.sandwich(b"\x57" * 96, b"\x57" * 32) == b"\x57" * 160
assert c.sandwich(b"\x57" * 97, b"\x57" * 32) == b"\x57" * 161
assert c.fivetimes(b"mongoose" * 4) == b"mongoose" * 20
print("Passed concat bytes32 test")
def test_konkat_code(get_contract_with_gas_estimation):
konkat_code = """
ecks: bytes32
@public
def foo(x: bytes32, y: bytes32) -> bytes[64]:
self.ecks = x
return concat(self.ecks, y)
@public
def goo(x: bytes32, y: bytes32) -> bytes[64]:
self.ecks = x
return concat(self.ecks, y)
@public
def hoo(x: bytes32, y: bytes32) -> bytes[64]:
return concat(x, y)
"""
c = get_contract_with_gas_estimation(konkat_code)
assert c.foo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.goo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.hoo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
print('Passed second concat tests')
def test_small_output(get_contract_with_gas_estimation):
code = """
@public
def small_output(a: string[5], b: string[4]) -> string[9]:
c: string[9] = concat(a, b)
return c
"""
c = get_contract_with_gas_estimation(code)
assert c.small_output('abcde', 'fghi') == 'abcdefghi'
assert c.small_output('', '') == ''
def test_large_output(get_contract_with_gas_estimation, assert_compile_failed):
code = """
@public
def large_output(a: string[33], b: string[33]) -> string[64]:
c: string[64] = concat(a, b)
return c
"""
assert_compile_failed(
lambda: get_contract_with_gas_estimation(code),
TypeMismatchException
)
| true
| true
|
f7156ce7fd453c52f14385b72fc6a38950f75874
| 5,307
|
py
|
Python
|
nicos_mlz/biodiff/setups/motor.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
nicos_mlz/biodiff/setups/motor.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1
|
2021-08-18T10:55:42.000Z
|
2021-08-18T10:55:42.000Z
|
nicos_mlz/biodiff/setups/motor.py
|
ISISComputingGroup/nicos
|
94cb4d172815919481f8c6ee686f21ebb76f2068
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
description = 'Axes setup'
group = 'lowlevel'
tango_base = 'tango://phys.biodiff.frm2:10000/biodiff/'
devices = dict(
omega_samplestepper = device('nicos.devices.tango.Motor',
description = 'Sample stepper omega variant',
tangodevice = tango_base + 'fzjs7/omega_samplestepper',
unit = 'deg',
precision = 0.001,
),
omega_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table omega variant',
tangodevice = tango_base + 'fzjs7/omega_sampletable',
unit = 'deg',
precision = 0.001,
),
x_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table x axis',
tangodevice = tango_base + 'fzjs7/x_sampletable',
unit = 'mm',
precision = 0.005,
),
y_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table y axis',
tangodevice = tango_base + 'fzjs7/y_sampletable',
unit = 'mm',
precision = 0.005,
),
z_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table x axis',
tangodevice = tango_base + 'fzjs7/z_sampletable',
unit = 'mm',
precision = 0.005,
),
theta_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator theta variant',
tangodevice = tango_base + 'fzjs7/theta_monochromator',
unit = 'deg',
precision = 0.001,
),
tilt_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator tilt',
tangodevice = tango_base + 'fzjs7/tilt_monochromator',
unit = 'deg',
precision = 0.005,
),
x_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator x axis',
tangodevice = tango_base + 'fzjs7/x_monochromator',
unit = 'mm',
precision = 0.002,
),
y_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator y axis',
tangodevice = tango_base + 'fzjs7/y_monochromator',
unit = 'mm',
precision = 0.002,
),
z_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator z axis',
tangodevice = tango_base + 'fzjs7/z_monochromator',
unit = 'mm',
precision = 0.002,
),
theta2_selectorarm = device('nicos.devices.tango.Motor',
description = 'Selector arm 2theta variant',
tangodevice = tango_base + 'fzjs7/2theta_selectorarm',
unit = 'deg',
precision = 0.005,
),
d_diaphragm1 = device('nicos.devices.tango.Motor',
description = 'Slit 1',
tangodevice = tango_base + 'fzjs7/d_diaphragm1',
unit = 'mm',
precision = 0.05,
),
d_diaphragm2 = device('nicos.devices.tango.Motor',
description = 'Slit 2',
tangodevice = tango_base + 'fzjs7/d_diaphragm2',
unit = 'mm',
precision = 0.05,
),
theta2_detectorunit = device('nicos.devices.tango.Motor',
description = 'Detector unit 2theta variant',
tangodevice = tango_base + 'fzjs7/2theta_detectorunit',
unit = 'deg',
precision = 0.005,
),
z_imageplate = device('nicos.devices.tango.Motor',
description = 'Neutron image plate z axis',
tangodevice = tango_base + 'fzjs7/z_neutronimageplate',
unit = 'mm',
precision = 0.01,
),
z_CCD = device('nicos.devices.tango.Motor',
description = 'CCD z axis',
tangodevice = tango_base + 'fzjs7/z_CCD',
unit = 'mm',
precision = 0.01,
),
z_CCDcamera = device('nicos.devices.tango.Motor',
description = 'CCD camera z axis',
tangodevice = tango_base + 'fzjs7/z_CCDcamera',
unit = 'mm',
precision = 0.01,
),
# theta2_CCDcamera = device('nicos.devices.tango.Motor',
# description = 'CCD camera 2theta variant',
# tangodevice = tango_base + 'fzjs7/2theta_CCDcamera',
# unit = 'deg',
# precision = 0.01,
# ),
rot_scintillatorhead = device('nicos_mlz.biodiff.devices.motor.S7InterlockMotor',
description = 'Scintillator head rotation',
tangodevice = tango_base + 'fzjs7/rot_scintillatorhead',
unit = 'deg',
precision = 0.5,
),
# omega_samplegoniometer = device('nicos.devices.tango.Motor',
# description = 'Sample goniometer omega variant',
# tangodevice = tango_base + 'fzjs7/omega_samplegoniometer',
# ),
# x_samplegoniometer = device('nicos.devices.tango.Motor',
# description = 'Sample goniometer x axis',
# tangodevice = tango_base + 'fzjs7/x_samplegoniometer',
# ),
# y_samplegoniometer = device('nicos.devices.tango.Motor',
# description = 'Sample goniometer y axis',
# tangodevice = tango_base + 'fzjs7/y_samplegoniometer',
# ),
# rot_diaphragm3 = device('nicos.devices.tango.Motor',
# description = 'Slit 3',
# tangodevice = tango_base + 'fzjs7/rot_diaphragm3',
# unit = 'deg',
# ),
# rot_diaphragm4 = device('nicos.devices.tango.Motor',
# description = 'Slit 4',
# tangodevice = tango_base + 'fzjs7/rot_diaphragm4',
# unit = 'deg',
# ),
)
| 36.349315
| 85
| 0.606181
|
description = 'Axes setup'
group = 'lowlevel'
tango_base = 'tango://phys.biodiff.frm2:10000/biodiff/'
devices = dict(
omega_samplestepper = device('nicos.devices.tango.Motor',
description = 'Sample stepper omega variant',
tangodevice = tango_base + 'fzjs7/omega_samplestepper',
unit = 'deg',
precision = 0.001,
),
omega_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table omega variant',
tangodevice = tango_base + 'fzjs7/omega_sampletable',
unit = 'deg',
precision = 0.001,
),
x_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table x axis',
tangodevice = tango_base + 'fzjs7/x_sampletable',
unit = 'mm',
precision = 0.005,
),
y_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table y axis',
tangodevice = tango_base + 'fzjs7/y_sampletable',
unit = 'mm',
precision = 0.005,
),
z_sampletable = device('nicos.devices.tango.Motor',
description = 'Sample table x axis',
tangodevice = tango_base + 'fzjs7/z_sampletable',
unit = 'mm',
precision = 0.005,
),
theta_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator theta variant',
tangodevice = tango_base + 'fzjs7/theta_monochromator',
unit = 'deg',
precision = 0.001,
),
tilt_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator tilt',
tangodevice = tango_base + 'fzjs7/tilt_monochromator',
unit = 'deg',
precision = 0.005,
),
x_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator x axis',
tangodevice = tango_base + 'fzjs7/x_monochromator',
unit = 'mm',
precision = 0.002,
),
y_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator y axis',
tangodevice = tango_base + 'fzjs7/y_monochromator',
unit = 'mm',
precision = 0.002,
),
z_monochromator = device('nicos.devices.tango.Motor',
description = 'Monochromator z axis',
tangodevice = tango_base + 'fzjs7/z_monochromator',
unit = 'mm',
precision = 0.002,
),
theta2_selectorarm = device('nicos.devices.tango.Motor',
description = 'Selector arm 2theta variant',
tangodevice = tango_base + 'fzjs7/2theta_selectorarm',
unit = 'deg',
precision = 0.005,
),
d_diaphragm1 = device('nicos.devices.tango.Motor',
description = 'Slit 1',
tangodevice = tango_base + 'fzjs7/d_diaphragm1',
unit = 'mm',
precision = 0.05,
),
d_diaphragm2 = device('nicos.devices.tango.Motor',
description = 'Slit 2',
tangodevice = tango_base + 'fzjs7/d_diaphragm2',
unit = 'mm',
precision = 0.05,
),
theta2_detectorunit = device('nicos.devices.tango.Motor',
description = 'Detector unit 2theta variant',
tangodevice = tango_base + 'fzjs7/2theta_detectorunit',
unit = 'deg',
precision = 0.005,
),
z_imageplate = device('nicos.devices.tango.Motor',
description = 'Neutron image plate z axis',
tangodevice = tango_base + 'fzjs7/z_neutronimageplate',
unit = 'mm',
precision = 0.01,
),
z_CCD = device('nicos.devices.tango.Motor',
description = 'CCD z axis',
tangodevice = tango_base + 'fzjs7/z_CCD',
unit = 'mm',
precision = 0.01,
),
z_CCDcamera = device('nicos.devices.tango.Motor',
description = 'CCD camera z axis',
tangodevice = tango_base + 'fzjs7/z_CCDcamera',
unit = 'mm',
precision = 0.01,
),
rot_scintillatorhead = device('nicos_mlz.biodiff.devices.motor.S7InterlockMotor',
description = 'Scintillator head rotation',
tangodevice = tango_base + 'fzjs7/rot_scintillatorhead',
unit = 'deg',
precision = 0.5,
),
)
| true
| true
|
f7156d0af1e9a61d01bcad558cbe5b0d3ec055db
| 15,159
|
py
|
Python
|
src/config/api-server/tests/test_logical_router.py
|
amitkg29/contrail-controller
|
be71b50f185a68338ea54d6f8088623ab88c2bf6
|
[
"Apache-2.0"
] | null | null | null |
src/config/api-server/tests/test_logical_router.py
|
amitkg29/contrail-controller
|
be71b50f185a68338ea54d6f8088623ab88c2bf6
|
[
"Apache-2.0"
] | null | null | null |
src/config/api-server/tests/test_logical_router.py
|
amitkg29/contrail-controller
|
be71b50f185a68338ea54d6f8088623ab88c2bf6
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013,2014 Juniper Networks, Inc. All rights reserved.
#
import gevent
import os
import sys
import socket
import errno
import uuid
import logging
import coverage
import cgitb
cgitb.enable(format='text')
import testtools
from testtools.matchers import Equals, MismatchError, Not, Contains
from testtools import content, content_type, ExpectedException
import unittest
import re
import json
import copy
import inspect
import pycassa
import kombu
import requests
import netaddr
from vnc_api.vnc_api import *
from vnc_api.common import exceptions as vnc_exceptions
import vnc_api.gen.vnc_api_test_gen
from vnc_api.gen.resource_test import *
from netaddr import IPNetwork, IPAddress
import cfgm_common
sys.path.append('../common/tests')
from test_utils import *
import test_common
import test_case
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TestLogicalRouter(test_case.ApiServerTestCase):
def test_lr_v4_subnets(self):
print '*** test logical router creation and interface-add of v4 subnets ***'
# Create Domain
domain = Domain('my-lr-domain')
self._vnc_lib.domain_create(domain)
print 'Created domain '
# Create Project
project = Project('my-lr-proj', domain)
self._vnc_lib.project_create(project)
print 'Created Project'
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
print 'Created network ipam'
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-lr-domain', 'my-lr-proj',
'default-network-ipam'])
print 'Read network ipam'
# Create subnets
ipam_sn_v4_vn1 = IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))
ipam_sn_v6_vn1 = IpamSubnetType(subnet=SubnetType('fd11::', 120))
ipam_sn_v4_vn2 = IpamSubnetType(subnet=SubnetType('11.1.2.0', 24))
ipam_sn_v6_vn2 = IpamSubnetType(subnet=SubnetType('fd12::', 120))
# Create VN my-vn-1
vn1 = VirtualNetwork('my-vn-1', project)
vn1.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4_vn1, ipam_sn_v6_vn1]))
self._vnc_lib.virtual_network_create(vn1)
print 'Created Virtual Network object for my-vn-1 ', vn1.uuid
net_obj1 = self._vnc_lib.virtual_network_read(id = vn1.uuid)
# Create VN my-vn-2
vn2 = VirtualNetwork('my-vn-2', project)
vn2.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4_vn2, ipam_sn_v6_vn2]))
self._vnc_lib.virtual_network_create(vn2)
print 'Created Virtual Network object for my-vn-2 ', vn2.uuid
net_obj2 = self._vnc_lib.virtual_network_read(id = vn2.uuid)
# Create Logical Router
lr = LogicalRouter('router-test-v4', project)
lr_uuid = self._vnc_lib.logical_router_create(lr)
print 'Created Logical Router '
# Create a Virtual Machine Interface belonging to my-vn-1
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn1)
port_obj1.set_virtual_machine_interface_device_owner('DEVICE_OWNER_ROUTER_INTF')
#Assign gateway ip
ipam_refs = net_obj1.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
if IPNetwork(cidr).version is 4:
gateway_ip = subnet.get_default_gateway()
print ' *** subnet gateway (%s)' %(gateway_ip)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
print 'Created Virtual Machine Interface'
# Create v4 Ip object
ip_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address=gateway_ip,
instance_ip_family='v4')
ip_obj1.uuid = ip_obj1.name
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj1)
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
# Add Router Interface (test being subnet)
lr.add_virtual_machine_interface(port_obj1)
self._vnc_lib.logical_router_update(lr)
print 'Linked VMI object (VN1) and LR object'
# Create a Virtual Machine Interface belonging to my-vn-2
port_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project, id_perms=id_perms)
port_obj2.uuid = port_obj2.name
port_obj2.set_virtual_network(vn2)
port_obj2.set_virtual_machine_interface_device_owner('DEVICE_OWNER_ROUTER_INTF')
#Assign gateway ip
ipam_refs = net_obj2.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
if IPNetwork(cidr).version is 4:
gateway_ip = subnet.get_default_gateway()
print ' *** subnet gateway (%s)' %(gateway_ip)
port_id2 = self._vnc_lib.virtual_machine_interface_create(port_obj2)
print 'Created Virtual Machine Interface'
# Create v4 Ip object
ip_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address=gateway_ip,
instance_ip_family='v4')
ip_obj2.uuid = ip_obj2.name
ip_obj2.set_virtual_machine_interface(port_obj2)
ip_obj2.set_virtual_network(net_obj2)
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
# Add Router Interface (test being subnet)
lr.add_virtual_machine_interface(port_obj2)
self._vnc_lib.logical_router_update(lr)
print 'Linked VMI object (VN2) and LR object'
# Verify logical-router dumps
lr.dump()
# TODO: Schema transformer not integrated in the tests,
# hence route-target refs not set yet
# Verify Route Target Creation
rt_refs = lr.get_route_target_refs()
if not rt_refs:
print ' !!! Schema Transformer not integrated in test yet !!!'
print ' !!! route-target not associated to Logical Router'
else:
for rt_ref in rt_refs:
print ' Route Target (%s)' %(rt_ref['to'])
rt_obj = self._vnc_lib.route_target_read(id=rt_ref['uuid'])
ri_refs = rt_obj.get_routing_instance_back_refs()
for ri_ref in ri_refs:
ri_obj = self.vnc_lib.routing_instance_read(id=ri_ref['uuid'])
ri_name = ri_obj.get_display_name()
print ' Routing Instance (%s)' %(ri_name)
if ((ri_name != 'my-vn-1') and (ri_name != 'my-vn-2')):
print ' Failure, Logical-Router not associated to expected VN'
#cleanup
print 'Cleaning up'
self._vnc_lib.instance_ip_delete(id=ip_id1)
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.logical_router_delete(id=lr_uuid)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj2.uuid)
self._vnc_lib.virtual_network_delete(id=vn1.uuid)
self._vnc_lib.virtual_network_delete(id=vn2.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
def test_lr_v6_subnets(self):
print '*** test logical router creation and interface-add of v6 subnets ***'
# Create Domain
domain = Domain('my-lr-domain')
self._vnc_lib.domain_create(domain)
print 'Created domain '
# Create Project
project = Project('my-lr-proj', domain)
self._vnc_lib.project_create(project)
print 'Created Project'
# Create NetworkIpam
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
print 'Created network ipam'
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-lr-domain', 'my-lr-proj',
'default-network-ipam'])
print 'Read network ipam'
# Create subnets
ipam_sn_v4_vn1 = IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))
ipam_sn_v6_vn1 = IpamSubnetType(subnet=SubnetType('fd11::', 120))
ipam_sn_v4_vn2 = IpamSubnetType(subnet=SubnetType('11.1.2.0', 24))
ipam_sn_v6_vn2 = IpamSubnetType(subnet=SubnetType('fd12::', 120))
# Create VN my-vn-1
vn1 = VirtualNetwork('my-vn-1', project)
vn1.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4_vn1, ipam_sn_v6_vn1]))
self._vnc_lib.virtual_network_create(vn1)
print 'Created Virtual Network object for my-vn-1 ', vn1.uuid
net_obj1 = self._vnc_lib.virtual_network_read(id = vn1.uuid)
# Create VN my-vn-2
vn2 = VirtualNetwork('my-vn-2', project)
vn2.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4_vn2, ipam_sn_v6_vn2]))
self._vnc_lib.virtual_network_create(vn2)
print 'Created Virtual Network object for my-vn-2 ', vn2.uuid
net_obj2 = self._vnc_lib.virtual_network_read(id = vn2.uuid)
# Create Logical Router
lr = LogicalRouter('router-test-v6', project)
lr_uuid = self._vnc_lib.logical_router_create(lr)
print 'Created Logical Router '
# Create a Virtual Machine Interface belonging to my-vn-1
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn1)
port_obj1.set_virtual_machine_interface_device_owner('DEVICE_OWNER_ROUTER_INTF')
#Assign gateway ip
ipam_refs = net_obj1.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
if IPNetwork(cidr).version is 6:
gateway_ip = subnet.get_default_gateway()
print ' *** subnet gateway (%s)' %(gateway_ip)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
print 'Created Virtual Machine Interface'
# Create v6 Ip object
ip_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address=gateway_ip,
instance_ip_family='v6')
ip_obj1.uuid = ip_obj1.name
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj1)
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
# Add Router Interface (test being subnet)
lr.add_virtual_machine_interface(port_obj1)
lr_obj = self._vnc_lib.logical_router_read(id=lr_uuid)
self._vnc_lib.logical_router_update(lr_obj)
print 'Linked VMI object (VN1) and LR object'
# Create a Virtual Machine Interface belonging to my-vn-2
port_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project, id_perms=id_perms)
port_obj2.uuid = port_obj2.name
port_obj2.set_virtual_network(vn2)
port_obj2.set_virtual_machine_interface_device_owner('DEVICE_OWNER_ROUTER_INTF')
#Assign gateway ip
ipam_refs = net_obj2.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
if IPNetwork(cidr).version is 6:
gateway_ip = subnet.get_default_gateway()
print ' *** subnet gateway (%s)' %(gateway_ip)
port_id2 = self._vnc_lib.virtual_machine_interface_create(port_obj2)
print 'Created Virtual Machine Interface'
# Create v6 Ip object
ip_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address=gateway_ip,
instance_ip_family='v6')
ip_obj2.uuid = ip_obj2.name
ip_obj2.set_virtual_machine_interface(port_obj2)
ip_obj2.set_virtual_network(net_obj2)
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
# Add Router Interface (test being subnet)
lr.add_virtual_machine_interface(port_obj2)
lr_obj = self._vnc_lib.logical_router_read(id=lr_uuid)
self._vnc_lib.logical_router_update(lr_obj)
print 'Linked VMI object (VN2) and LR object'
# Verify logical-router dumps
lr.dump()
# TODO: Schema transformer not integrated in the tests,
# hence route-target refs not set yet
# Verify Route Target Creation
rt_refs = lr.get_route_target_refs()
if not rt_refs:
print ' !!! Schema Transformer not integrated in test yet !!!'
print ' !!! route-target not associated to Logical Router'
else:
for rt_ref in rt_refs:
print ' Route Target (%s)' %(rt_ref['to'])
rt_obj = self._vnc_lib.route_target_read(id=rt_ref['uuid'])
ri_refs = rt_obj.get_routing_instance_back_refs()
for ri_ref in ri_refs:
ri_obj = self.vnc_lib.routing_instance_read(id=ri_ref['uuid'])
ri_name = ri_obj.get_display_name()
print ' Routing Instance (%s)' %(ri_name)
if ((ri_name() != 'my-vn-1') and (ri_name() != 'my-vn-2')):
print ' Failure, Logical-Router not associated to expected VN'
#cleanup
print 'Cleaning up'
self._vnc_lib.instance_ip_delete(id=ip_id1)
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj2.uuid)
self._vnc_lib.logical_router_delete(id=lr_uuid)
self._vnc_lib.virtual_network_delete(id=vn1.uuid)
self._vnc_lib.virtual_network_delete(id=vn2.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
#end
#end
#end class TestLogicalRouter
if __name__ == '__main__':
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
unittest.main()
| 42.343575
| 88
| 0.647734
|
import gevent
import os
import sys
import socket
import errno
import uuid
import logging
import coverage
import cgitb
cgitb.enable(format='text')
import testtools
from testtools.matchers import Equals, MismatchError, Not, Contains
from testtools import content, content_type, ExpectedException
import unittest
import re
import json
import copy
import inspect
import pycassa
import kombu
import requests
import netaddr
from vnc_api.vnc_api import *
from vnc_api.common import exceptions as vnc_exceptions
import vnc_api.gen.vnc_api_test_gen
from vnc_api.gen.resource_test import *
from netaddr import IPNetwork, IPAddress
import cfgm_common
sys.path.append('../common/tests')
from test_utils import *
import test_common
import test_case
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TestLogicalRouter(test_case.ApiServerTestCase):
def test_lr_v4_subnets(self):
print '*** test logical router creation and interface-add of v4 subnets ***'
domain = Domain('my-lr-domain')
self._vnc_lib.domain_create(domain)
print 'Created domain '
project = Project('my-lr-proj', domain)
self._vnc_lib.project_create(project)
print 'Created Project'
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
print 'Created network ipam'
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-lr-domain', 'my-lr-proj',
'default-network-ipam'])
print 'Read network ipam'
ipam_sn_v4_vn1 = IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))
ipam_sn_v6_vn1 = IpamSubnetType(subnet=SubnetType('fd11::', 120))
ipam_sn_v4_vn2 = IpamSubnetType(subnet=SubnetType('11.1.2.0', 24))
ipam_sn_v6_vn2 = IpamSubnetType(subnet=SubnetType('fd12::', 120))
vn1 = VirtualNetwork('my-vn-1', project)
vn1.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4_vn1, ipam_sn_v6_vn1]))
self._vnc_lib.virtual_network_create(vn1)
print 'Created Virtual Network object for my-vn-1 ', vn1.uuid
net_obj1 = self._vnc_lib.virtual_network_read(id = vn1.uuid)
vn2 = VirtualNetwork('my-vn-2', project)
vn2.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4_vn2, ipam_sn_v6_vn2]))
self._vnc_lib.virtual_network_create(vn2)
print 'Created Virtual Network object for my-vn-2 ', vn2.uuid
net_obj2 = self._vnc_lib.virtual_network_read(id = vn2.uuid)
lr = LogicalRouter('router-test-v4', project)
lr_uuid = self._vnc_lib.logical_router_create(lr)
print 'Created Logical Router '
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn1)
port_obj1.set_virtual_machine_interface_device_owner('DEVICE_OWNER_ROUTER_INTF')
ipam_refs = net_obj1.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
if IPNetwork(cidr).version is 4:
gateway_ip = subnet.get_default_gateway()
print ' *** subnet gateway (%s)' %(gateway_ip)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
print 'Created Virtual Machine Interface'
ip_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address=gateway_ip,
instance_ip_family='v4')
ip_obj1.uuid = ip_obj1.name
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj1)
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
lr.add_virtual_machine_interface(port_obj1)
self._vnc_lib.logical_router_update(lr)
print 'Linked VMI object (VN1) and LR object'
port_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project, id_perms=id_perms)
port_obj2.uuid = port_obj2.name
port_obj2.set_virtual_network(vn2)
port_obj2.set_virtual_machine_interface_device_owner('DEVICE_OWNER_ROUTER_INTF')
ipam_refs = net_obj2.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
if IPNetwork(cidr).version is 4:
gateway_ip = subnet.get_default_gateway()
print ' *** subnet gateway (%s)' %(gateway_ip)
port_id2 = self._vnc_lib.virtual_machine_interface_create(port_obj2)
print 'Created Virtual Machine Interface'
ip_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address=gateway_ip,
instance_ip_family='v4')
ip_obj2.uuid = ip_obj2.name
ip_obj2.set_virtual_machine_interface(port_obj2)
ip_obj2.set_virtual_network(net_obj2)
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
lr.add_virtual_machine_interface(port_obj2)
self._vnc_lib.logical_router_update(lr)
print 'Linked VMI object (VN2) and LR object'
lr.dump()
rt_refs = lr.get_route_target_refs()
if not rt_refs:
print ' !!! Schema Transformer not integrated in test yet !!!'
print ' !!! route-target not associated to Logical Router'
else:
for rt_ref in rt_refs:
print ' Route Target (%s)' %(rt_ref['to'])
rt_obj = self._vnc_lib.route_target_read(id=rt_ref['uuid'])
ri_refs = rt_obj.get_routing_instance_back_refs()
for ri_ref in ri_refs:
ri_obj = self.vnc_lib.routing_instance_read(id=ri_ref['uuid'])
ri_name = ri_obj.get_display_name()
print ' Routing Instance (%s)' %(ri_name)
if ((ri_name != 'my-vn-1') and (ri_name != 'my-vn-2')):
print ' Failure, Logical-Router not associated to expected VN'
print 'Cleaning up'
self._vnc_lib.instance_ip_delete(id=ip_id1)
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.logical_router_delete(id=lr_uuid)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj2.uuid)
self._vnc_lib.virtual_network_delete(id=vn1.uuid)
self._vnc_lib.virtual_network_delete(id=vn2.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
def test_lr_v6_subnets(self):
print '*** test logical router creation and interface-add of v6 subnets ***'
domain = Domain('my-lr-domain')
self._vnc_lib.domain_create(domain)
print 'Created domain '
project = Project('my-lr-proj', domain)
self._vnc_lib.project_create(project)
print 'Created Project'
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
print 'Created network ipam'
ipam = self._vnc_lib.network_ipam_read(fq_name=['my-lr-domain', 'my-lr-proj',
'default-network-ipam'])
print 'Read network ipam'
ipam_sn_v4_vn1 = IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))
ipam_sn_v6_vn1 = IpamSubnetType(subnet=SubnetType('fd11::', 120))
ipam_sn_v4_vn2 = IpamSubnetType(subnet=SubnetType('11.1.2.0', 24))
ipam_sn_v6_vn2 = IpamSubnetType(subnet=SubnetType('fd12::', 120))
vn1 = VirtualNetwork('my-vn-1', project)
vn1.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4_vn1, ipam_sn_v6_vn1]))
self._vnc_lib.virtual_network_create(vn1)
print 'Created Virtual Network object for my-vn-1 ', vn1.uuid
net_obj1 = self._vnc_lib.virtual_network_read(id = vn1.uuid)
vn2 = VirtualNetwork('my-vn-2', project)
vn2.add_network_ipam(ipam, VnSubnetsType([ipam_sn_v4_vn2, ipam_sn_v6_vn2]))
self._vnc_lib.virtual_network_create(vn2)
print 'Created Virtual Network object for my-vn-2 ', vn2.uuid
net_obj2 = self._vnc_lib.virtual_network_read(id = vn2.uuid)
lr = LogicalRouter('router-test-v6', project)
lr_uuid = self._vnc_lib.logical_router_create(lr)
print 'Created Logical Router '
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn1)
port_obj1.set_virtual_machine_interface_device_owner('DEVICE_OWNER_ROUTER_INTF')
ipam_refs = net_obj1.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
if IPNetwork(cidr).version is 6:
gateway_ip = subnet.get_default_gateway()
print ' *** subnet gateway (%s)' %(gateway_ip)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
print 'Created Virtual Machine Interface'
ip_obj1 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address=gateway_ip,
instance_ip_family='v6')
ip_obj1.uuid = ip_obj1.name
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj1)
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
lr.add_virtual_machine_interface(port_obj1)
lr_obj = self._vnc_lib.logical_router_read(id=lr_uuid)
self._vnc_lib.logical_router_update(lr_obj)
print 'Linked VMI object (VN1) and LR object'
port_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project, id_perms=id_perms)
port_obj2.uuid = port_obj2.name
port_obj2.set_virtual_network(vn2)
port_obj2.set_virtual_machine_interface_device_owner('DEVICE_OWNER_ROUTER_INTF')
ipam_refs = net_obj2.get_network_ipam_refs()
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
if IPNetwork(cidr).version is 6:
gateway_ip = subnet.get_default_gateway()
print ' *** subnet gateway (%s)' %(gateway_ip)
port_id2 = self._vnc_lib.virtual_machine_interface_create(port_obj2)
print 'Created Virtual Machine Interface'
ip_obj2 = InstanceIp(name=str(uuid.uuid4()), instance_ip_address=gateway_ip,
instance_ip_family='v6')
ip_obj2.uuid = ip_obj2.name
ip_obj2.set_virtual_machine_interface(port_obj2)
ip_obj2.set_virtual_network(net_obj2)
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
lr.add_virtual_machine_interface(port_obj2)
lr_obj = self._vnc_lib.logical_router_read(id=lr_uuid)
self._vnc_lib.logical_router_update(lr_obj)
print 'Linked VMI object (VN2) and LR object'
lr.dump()
rt_refs = lr.get_route_target_refs()
if not rt_refs:
print ' !!! Schema Transformer not integrated in test yet !!!'
print ' !!! route-target not associated to Logical Router'
else:
for rt_ref in rt_refs:
print ' Route Target (%s)' %(rt_ref['to'])
rt_obj = self._vnc_lib.route_target_read(id=rt_ref['uuid'])
ri_refs = rt_obj.get_routing_instance_back_refs()
for ri_ref in ri_refs:
ri_obj = self.vnc_lib.routing_instance_read(id=ri_ref['uuid'])
ri_name = ri_obj.get_display_name()
print ' Routing Instance (%s)' %(ri_name)
if ((ri_name() != 'my-vn-1') and (ri_name() != 'my-vn-2')):
print ' Failure, Logical-Router not associated to expected VN'
print 'Cleaning up'
self._vnc_lib.instance_ip_delete(id=ip_id1)
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj2.uuid)
self._vnc_lib.logical_router_delete(id=lr_uuid)
self._vnc_lib.virtual_network_delete(id=vn1.uuid)
self._vnc_lib.virtual_network_delete(id=vn2.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
if __name__ == '__main__':
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
unittest.main()
| false
| true
|
f7156e71c027a040fc6f4c1727ecee29015afce9
| 303
|
py
|
Python
|
listing_8-5.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
listing_8-5.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
listing_8-5.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
# Listing_8-5.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Printing the 8 times table using range()
for looper in range(1, 11):
print looper, "times 8 =", looper * 8
| 30.3
| 81
| 0.643564
|
for looper in range(1, 11):
print looper, "times 8 =", looper * 8
| false
| true
|
f7156edb72ba4944c07e754e6e68e17a3a4c0c87
| 648
|
py
|
Python
|
trade_remedies_api/organisations/migrations/0011_organisation_merged_from.py
|
uktrade/trade-remedies-api
|
fbe2d142ef099c7244788a0f72dd1003eaa7edce
|
[
"MIT"
] | 1
|
2020-08-13T10:37:15.000Z
|
2020-08-13T10:37:15.000Z
|
trade_remedies_api/organisations/migrations/0011_organisation_merged_from.py
|
uktrade/trade-remedies-api
|
fbe2d142ef099c7244788a0f72dd1003eaa7edce
|
[
"MIT"
] | 4
|
2020-09-10T13:41:52.000Z
|
2020-12-16T09:00:21.000Z
|
trade_remedies_api/organisations/migrations/0011_organisation_merged_from.py
|
uktrade/trade-remedies-api
|
fbe2d142ef099c7244788a0f72dd1003eaa7edce
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-11-06 15:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("organisations", "0010_auto_20191024_1353"),
]
operations = [
migrations.AddField(
model_name="organisation",
name="merged_from",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="merged_from_org",
to="organisations.Organisation",
),
),
]
| 24.923077
| 60
| 0.574074
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("organisations", "0010_auto_20191024_1353"),
]
operations = [
migrations.AddField(
model_name="organisation",
name="merged_from",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="merged_from_org",
to="organisations.Organisation",
),
),
]
| true
| true
|
f7156f2a19e53f51807d2c9be830a384fe50f7d0
| 4,368
|
py
|
Python
|
tests/unit/test_clearmot.py
|
traffic-ai/EvalDeT
|
3b52698e1b03fb9066e3203c2f36aebfa0030aba
|
[
"Apache-2.0"
] | 2
|
2021-12-19T21:55:12.000Z
|
2021-12-19T21:55:19.000Z
|
tests/unit/test_clearmot.py
|
sasp-ai/EvalDeT
|
3b52698e1b03fb9066e3203c2f36aebfa0030aba
|
[
"Apache-2.0"
] | 10
|
2021-08-07T09:51:27.000Z
|
2021-08-29T07:26:07.000Z
|
tests/unit/test_clearmot.py
|
traffic-ai/EvalDeT
|
3b52698e1b03fb9066e3203c2f36aebfa0030aba
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
from evaldet import Tracks
from evaldet.mot_metrics.clearmot import calculate_clearmot_metrics
def test_missing_frame_hyp():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["FP_CLEAR"] == 0
assert metrics["IDS"] == 0
def test_missing_frame_gt():
gt = Tracks()
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["IDS"] == 0
assert metrics["FN_CLEAR"] == 0
assert metrics["FP_CLEAR"] == 1
def test_no_association_made():
gt = Tracks()
gt.add_frame(0, [0], np.array([[10, 10, 11, 11]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["IDS"] == 0
assert metrics["FN_CLEAR"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == -1 # Stange but ok
assert np.isnan(metrics["MOTP"])
@pytest.mark.parametrize("threshold", [0.3, 0.5, 0.7])
def test_dist_threshold(threshold: float):
gt = Tracks()
gt.add_frame(
0,
[0, 1, 2, 3],
np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1]]),
)
hyp = Tracks()
hyp.add_frame(
0,
[0, 1, 2, 3],
np.array([[0, 0, 1, 0.2], [0, 0, 1, 0.4], [0, 0, 1, 0.6], [0, 0, 1, 0.8]]),
)
fn_res = {0.3: 3, 0.5: 2, 0.7: 1}
metrics = calculate_clearmot_metrics(gt, hyp, dist_threshold=threshold)
assert fn_res[threshold] == metrics["FN_CLEAR"]
def test_sticky_association():
"""Test that as long as distance is below threshold, the association does
not switch, even if a detection with better IoU score appears.
"""
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 0
assert metrics["IDS"] == 0
assert metrics["FP_CLEAR"] == 1
def test_mismatch():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [1], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 0
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 0
def test_persistent_mismatch():
"""Test that association (and therefore mismatch) persists even
when the first matched hypothesis is gone, as long as another one
is not assigned."""
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(2, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(2, [1], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 0
def test_simple_case():
"""Test a simple case with 3 frames and 2 detections/gts per frame."""
gt = Tracks()
gt.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
gt.add_frame(1, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
gt.add_frame(2, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
hyp = Tracks()
hyp.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [1, 1, 2, 2]]))
hyp.add_frame(2, [2, 1], np.array([[0.05, 0.05, 1.05, 1.05], [2, 2, 3, 3]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == 0.5
assert metrics["MOTP"] == 0.0994008537355717
| 30.545455
| 83
| 0.565476
|
import numpy as np
import pytest
from evaldet import Tracks
from evaldet.mot_metrics.clearmot import calculate_clearmot_metrics
def test_missing_frame_hyp():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["FP_CLEAR"] == 0
assert metrics["IDS"] == 0
def test_missing_frame_gt():
gt = Tracks()
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["IDS"] == 0
assert metrics["FN_CLEAR"] == 0
assert metrics["FP_CLEAR"] == 1
def test_no_association_made():
gt = Tracks()
gt.add_frame(0, [0], np.array([[10, 10, 11, 11]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["IDS"] == 0
assert metrics["FN_CLEAR"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == -1
assert np.isnan(metrics["MOTP"])
@pytest.mark.parametrize("threshold", [0.3, 0.5, 0.7])
def test_dist_threshold(threshold: float):
gt = Tracks()
gt.add_frame(
0,
[0, 1, 2, 3],
np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1]]),
)
hyp = Tracks()
hyp.add_frame(
0,
[0, 1, 2, 3],
np.array([[0, 0, 1, 0.2], [0, 0, 1, 0.4], [0, 0, 1, 0.6], [0, 0, 1, 0.8]]),
)
fn_res = {0.3: 3, 0.5: 2, 0.7: 1}
metrics = calculate_clearmot_metrics(gt, hyp, dist_threshold=threshold)
assert fn_res[threshold] == metrics["FN_CLEAR"]
def test_sticky_association():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 0
assert metrics["IDS"] == 0
assert metrics["FP_CLEAR"] == 1
def test_mismatch():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(1, [1], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 0
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 0
def test_persistent_mismatch():
gt = Tracks()
gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))
gt.add_frame(2, [0], np.array([[0, 0, 1, 1]]))
hyp = Tracks()
hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))
hyp.add_frame(2, [1], np.array([[0, 0, 1, 1]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 0
def test_simple_case():
gt = Tracks()
gt.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
gt.add_frame(1, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
gt.add_frame(2, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))
hyp = Tracks()
hyp.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))
hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [1, 1, 2, 2]]))
hyp.add_frame(2, [2, 1], np.array([[0.05, 0.05, 1.05, 1.05], [2, 2, 3, 3]]))
metrics = calculate_clearmot_metrics(gt, hyp)
assert metrics["FN_CLEAR"] == 1
assert metrics["IDS"] == 1
assert metrics["FP_CLEAR"] == 1
assert metrics["MOTA"] == 0.5
assert metrics["MOTP"] == 0.0994008537355717
| true
| true
|
f7157086f3990ba862350c2dc2e8610185bd0247
| 1,377
|
py
|
Python
|
transcript/transcript/urls.py
|
Harrymissi/transcript-system
|
c7c3a8e505e4e8e5ca6ab5f934338bb8ff314260
|
[
"Apache-2.0"
] | 1
|
2019-02-25T23:17:18.000Z
|
2019-02-25T23:17:18.000Z
|
transcript/transcript/urls.py
|
Harrymissi/transcript-system
|
c7c3a8e505e4e8e5ca6ab5f934338bb8ff314260
|
[
"Apache-2.0"
] | null | null | null |
transcript/transcript/urls.py
|
Harrymissi/transcript-system
|
c7c3a8e505e4e8e5ca6ab5f934338bb8ff314260
|
[
"Apache-2.0"
] | null | null | null |
"""transcript URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import xadmin
from django.views.generic import TemplateView
from trans_sys.views import user_login, user_info, user_course, user_GPA, user_transcript, changeProfile, change_password
urlpatterns = [
path('xadmin/', xadmin.site.urls),
path('index/',TemplateView.as_view(template_name="index.html"),name = "index"),
path('login/', user_login, name="login"),
path('info/',user_info,name="info"),
path('course/',user_course,name="course"),
path('gpa/', user_GPA, name="gpa"),
path('transcript/', user_transcript, name="transcript"),
path('changeProfile', changeProfile, name="changeProfile"),
path('changePWD', change_password, name="changePWD" ),
]
| 43.03125
| 121
| 0.718228
|
from django.contrib import admin
from django.urls import path
import xadmin
from django.views.generic import TemplateView
from trans_sys.views import user_login, user_info, user_course, user_GPA, user_transcript, changeProfile, change_password
urlpatterns = [
path('xadmin/', xadmin.site.urls),
path('index/',TemplateView.as_view(template_name="index.html"),name = "index"),
path('login/', user_login, name="login"),
path('info/',user_info,name="info"),
path('course/',user_course,name="course"),
path('gpa/', user_GPA, name="gpa"),
path('transcript/', user_transcript, name="transcript"),
path('changeProfile', changeProfile, name="changeProfile"),
path('changePWD', change_password, name="changePWD" ),
]
| true
| true
|
f7157154f136fad7994d2221db333cf67ad7e9d1
| 6,774
|
py
|
Python
|
samples/client/petstore/python-experimental/petstore_api/models/grandparent_animal.py
|
jonnii/openapi-generator
|
b828860614df0b5207761c2a34c6a002fb56419b
|
[
"Apache-2.0"
] | 1
|
2021-01-26T15:23:10.000Z
|
2021-01-26T15:23:10.000Z
|
samples/client/petstore/python-experimental/petstore_api/models/grandparent_animal.py
|
jonnii/openapi-generator
|
b828860614df0b5207761c2a34c6a002fb56419b
|
[
"Apache-2.0"
] | 5
|
2021-03-10T19:39:24.000Z
|
2022-02-27T05:24:35.000Z
|
samples/client/petstore/python-experimental/petstore_api/models/grandparent_animal.py
|
jonnii/openapi-generator
|
b828860614df0b5207761c2a34c6a002fb56419b
|
[
"Apache-2.0"
] | 2
|
2020-08-06T08:52:02.000Z
|
2021-05-06T09:22:11.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from petstore_api.models import child_cat
except ImportError:
child_cat = sys.modules[
'petstore_api.models.child_cat']
try:
from petstore_api.models import child_dog
except ImportError:
child_dog = sys.modules[
'petstore_api.models.child_dog']
try:
from petstore_api.models import child_lizard
except ImportError:
child_lizard = sys.modules[
'petstore_api.models.child_lizard']
try:
from petstore_api.models import parent_pet
except ImportError:
parent_pet = sys.modules[
'petstore_api.models.parent_pet']
class GrandparentAnimal(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'pet_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
'ChildCat': child_cat.ChildCat,
'ChildDog': child_dog.ChildDog,
'ChildLizard': child_lizard.ChildLizard,
'ParentPet': parent_pet.ParentPet,
}
if not val:
return None
return {'pet_type': val}
attribute_map = {
'pet_type': 'pet_type', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, pet_type, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, _visited_composed_classes=(), **kwargs): # noqa: E501
"""grandparent_animal.GrandparentAnimal - a model defined in OpenAPI
Args:
pet_type (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.pet_type = pet_type
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 36.815217
| 174
| 0.602303
|
from __future__ import absolute_import
import re
import sys
import six
import nulltype
from petstore_api.model_utils import (
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from petstore_api.models import child_cat
except ImportError:
child_cat = sys.modules[
'petstore_api.models.child_cat']
try:
from petstore_api.models import child_dog
except ImportError:
child_dog = sys.modules[
'petstore_api.models.child_dog']
try:
from petstore_api.models import child_lizard
except ImportError:
child_lizard = sys.modules[
'petstore_api.models.child_lizard']
try:
from petstore_api.models import parent_pet
except ImportError:
parent_pet = sys.modules[
'petstore_api.models.parent_pet']
class GrandparentAnimal(ModelNormal):
allowed_values = {
}
validations = {
}
additional_properties_type = None
@cached_property
def openapi_types():
return {
'pet_type': (str,),
}
@cached_property
def discriminator():
val = {
'ChildCat': child_cat.ChildCat,
'ChildDog': child_dog.ChildDog,
'ChildLizard': child_lizard.ChildLizard,
'ParentPet': parent_pet.ParentPet,
}
if not val:
return None
return {'pet_type': val}
attribute_map = {
'pet_type': 'pet_type',
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, pet_type, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, _visited_composed_classes=(), **kwargs):
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.pet_type = pet_type
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true
| true
|
f715723718b976ccc5b5a3dc7091ae07ddbd3d22
| 3,657
|
py
|
Python
|
plex/utils/datasource/thetvdb.py
|
spuriousdata/plex-cli
|
f2561d1a68917edbc9bbcce39a9548da86d2d7ef
|
[
"MIT"
] | 1
|
2018-03-26T20:06:57.000Z
|
2018-03-26T20:06:57.000Z
|
plex/utils/datasource/thetvdb.py
|
spuriousdata/plex-cli
|
f2561d1a68917edbc9bbcce39a9548da86d2d7ef
|
[
"MIT"
] | null | null | null |
plex/utils/datasource/thetvdb.py
|
spuriousdata/plex-cli
|
f2561d1a68917edbc9bbcce39a9548da86d2d7ef
|
[
"MIT"
] | null | null | null |
import json
import requests
class TVDBHttpException(Exception):
pass
class TVDB(object):
base = 'https://api.thetvdb.com'
def __init__(self, apikey=None, username=None, userkey=None):
self.username = username
self.userkey = userkey
self.apikey = apikey
self.authenticate()
def __get_url(self, path):
return self.base + '/' + path
def authenticate(self):
data = {
'apikey': self.apikey,
}
if self.username and self.userkey:
data.update({
'username': self.username,
'userkey': self.userkey,
})
response = requests.post(self.__get_url('login'),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
},
data=json.dumps(data))
rdata = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on login: %s" % rdata.get('Error', 'Unknown Error'))
self.__authtok = rdata['token']
def search(self, **kwargs):
response = requests.get(self.__get_url('search/series'),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
},
params=kwargs
)
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
def series_query(self, series=0, season=0):
response = requests.get(self.__get_url('series/{id}/episodes/query'.format(id=series)),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
},
params={'airedSeason': season}
)
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
def episode(self, id=0):
response = requests.get(self.__get_url('episodes/{id}'.format(id=id)),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
})
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
if __name__ == '__main__':
import sys
from pprint import pprint as pp
from argparse import ArgumentParser
from plex.utils.utils import s2d
parser = ArgumentParser()
parser.add_argument('-u', '--username', help='username')
parser.add_argument('-k', '--userkey', help='userkey')
parser.add_argument('-a', '--apikey', help='apikey', required=True)
parser.add_argument('ACTION', help='what to do')
parser.add_argument('ACTION_ARGS', help='key=val,key2=val2')
args = parser.parse_args(sys.argv[1:])
t = TVDB(args.apikey, args.username, args.userkey)
pp(getattr(t, args.ACTION)(**s2d(args.ACTION_ARGS)))
| 38.904255
| 106
| 0.508067
|
import json
import requests
class TVDBHttpException(Exception):
pass
class TVDB(object):
base = 'https://api.thetvdb.com'
def __init__(self, apikey=None, username=None, userkey=None):
self.username = username
self.userkey = userkey
self.apikey = apikey
self.authenticate()
def __get_url(self, path):
return self.base + '/' + path
def authenticate(self):
data = {
'apikey': self.apikey,
}
if self.username and self.userkey:
data.update({
'username': self.username,
'userkey': self.userkey,
})
response = requests.post(self.__get_url('login'),
headers={
'Accept': 'application/json',
'Content-Type': 'application/json',
},
data=json.dumps(data))
rdata = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on login: %s" % rdata.get('Error', 'Unknown Error'))
self.__authtok = rdata['token']
def search(self, **kwargs):
response = requests.get(self.__get_url('search/series'),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
},
params=kwargs
)
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
def series_query(self, series=0, season=0):
response = requests.get(self.__get_url('series/{id}/episodes/query'.format(id=series)),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
},
params={'airedSeason': season}
)
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
def episode(self, id=0):
response = requests.get(self.__get_url('episodes/{id}'.format(id=id)),
headers={
'Accept': 'application/json',
'Authorization': 'Bearer %s' % self.__authtok,
})
data = response.json()
if response.status_code != 200:
raise TVDBHttpException("non 200 response on search: %s" % data.get('Error', 'Unknown Error'))
return data
if __name__ == '__main__':
import sys
from pprint import pprint as pp
from argparse import ArgumentParser
from plex.utils.utils import s2d
parser = ArgumentParser()
parser.add_argument('-u', '--username', help='username')
parser.add_argument('-k', '--userkey', help='userkey')
parser.add_argument('-a', '--apikey', help='apikey', required=True)
parser.add_argument('ACTION', help='what to do')
parser.add_argument('ACTION_ARGS', help='key=val,key2=val2')
args = parser.parse_args(sys.argv[1:])
t = TVDB(args.apikey, args.username, args.userkey)
pp(getattr(t, args.ACTION)(**s2d(args.ACTION_ARGS)))
| true
| true
|
f71572afcd687fc4a51638572448889091aac7fe
| 615
|
py
|
Python
|
wework/migrations/0001_initial.py
|
edsion1107/pytest_backend
|
59caf69226b821497ee19673630226df24d34391
|
[
"BSD-3-Clause"
] | null | null | null |
wework/migrations/0001_initial.py
|
edsion1107/pytest_backend
|
59caf69226b821497ee19673630226df24d34391
|
[
"BSD-3-Clause"
] | 3
|
2020-02-11T23:52:19.000Z
|
2021-06-10T21:19:50.000Z
|
wework/migrations/0001_initial.py
|
edsion1107/pytest_backend
|
59caf69226b821497ee19673630226df24d34391
|
[
"BSD-3-Clause"
] | 1
|
2020-11-28T15:25:03.000Z
|
2020-11-28T15:25:03.000Z
|
# Generated by Django 2.1.7 on 2019-02-26 03:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create', models.DateTimeField(auto_created=True)),
('key', models.CharField(max_length=512)),
('expires_in', models.DateTimeField()),
],
),
]
| 25.625
| 114
| 0.573984
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create', models.DateTimeField(auto_created=True)),
('key', models.CharField(max_length=512)),
('expires_in', models.DateTimeField()),
],
),
]
| true
| true
|
f71573d18019e66119ed0720c4b4edddc4c1a5eb
| 987
|
py
|
Python
|
atom/nucleus/python/test/test_order_reconcile_return_object.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/test/test_order_reconcile_return_object.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/test/test_order_reconcile_return_object.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.order_reconcile_return_object import OrderReconcileReturnObject # noqa: E501
from nucleus_api.rest import ApiException
class TestOrderReconcileReturnObject(unittest.TestCase):
"""OrderReconcileReturnObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOrderReconcileReturnObject(self):
"""Test OrderReconcileReturnObject"""
# FIXME: construct object with mandatory attributes with example values
# model = nucleus_api.models.order_reconcile_return_object.OrderReconcileReturnObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.073171
| 109
| 0.733536
|
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.order_reconcile_return_object import OrderReconcileReturnObject
from nucleus_api.rest import ApiException
class TestOrderReconcileReturnObject(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testOrderReconcileReturnObject(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
f71573ed4848e9b29e823af2889eea2f2d9b5fc1
| 822
|
py
|
Python
|
inventory/home/view.py
|
Rohitkuru/Smart-Linux-Box
|
0cc1b0c4ebc9edb35b2ba64b51f451d36af87304
|
[
"MIT"
] | null | null | null |
inventory/home/view.py
|
Rohitkuru/Smart-Linux-Box
|
0cc1b0c4ebc9edb35b2ba64b51f451d36af87304
|
[
"MIT"
] | 1
|
2021-03-07T07:59:47.000Z
|
2021-03-07T07:59:47.000Z
|
inventory/home/view.py
|
Rohitkuru/dynamic-linux-inventory
|
0cc1b0c4ebc9edb35b2ba64b51f451d36af87304
|
[
"MIT"
] | null | null | null |
from flask import Blueprint,render_template,request,flash
from inventory.backend.scripts import scan
from inventory.Crud.operation import add_record
from inventory.models import *
home = Blueprint("home_view",__name__)
@home.route("/",methods = ['GET','POST'])
def home_view():
if request.method == "POST":
find_result = scan(request.form['range'])
if type(find_result) == list:
flash("Search completed and Inventory updated")
add_record(find_result,request.form['range'])
return render_template("home.html",find_result=Linux_inventory.query.all())
else:
flash(find_result)
return render_template("home.html",find_result=Linux_inventory.query.all())
return render_template("home.html",find_result=Linux_inventory.query.all())
| 37.363636
| 87
| 0.70073
|
from flask import Blueprint,render_template,request,flash
from inventory.backend.scripts import scan
from inventory.Crud.operation import add_record
from inventory.models import *
home = Blueprint("home_view",__name__)
@home.route("/",methods = ['GET','POST'])
def home_view():
if request.method == "POST":
find_result = scan(request.form['range'])
if type(find_result) == list:
flash("Search completed and Inventory updated")
add_record(find_result,request.form['range'])
return render_template("home.html",find_result=Linux_inventory.query.all())
else:
flash(find_result)
return render_template("home.html",find_result=Linux_inventory.query.all())
return render_template("home.html",find_result=Linux_inventory.query.all())
| true
| true
|
f7157414e7e3ec2bdef8398e48beb4165dba07b9
| 16,669
|
py
|
Python
|
MAML-ADML/meta.py
|
robustmetalearning/robust-meta-learning
|
08fc3e9302c9fbd1fcfc3e001e0b080a3c783c81
|
[
"MIT"
] | null | null | null |
MAML-ADML/meta.py
|
robustmetalearning/robust-meta-learning
|
08fc3e9302c9fbd1fcfc3e001e0b080a3c783c81
|
[
"MIT"
] | null | null | null |
MAML-ADML/meta.py
|
robustmetalearning/robust-meta-learning
|
08fc3e9302c9fbd1fcfc3e001e0b080a3c783c81
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from learner import Learner
from copy import deepcopy
def zero_nontrainable_grads(grads, trainable_layers=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]):
for index, grad_tensor in enumerate(grads):
if index not in trainable_layers:
grad_tensor = torch.zeros_like(grad_tensor)
def inputsPGD(metalearner, net, inputs, targets, params = False, evaluate = False):
if evaluate:
attack_steps = metalearner.eval_attack_steps
else:
attack_steps = metalearner.attack_steps
x = inputs.detach()
if not metalearner.no_random_start:
x = x + torch.zeros_like(x).uniform_(-metalearner.attack_epsilon, metalearner.attack_epsilon)
for i in range(attack_steps):
x.requires_grad_()
with torch.enable_grad():
if params:
loss = F.cross_entropy(net(x, params), targets, size_average=False)
else:
loss = F.cross_entropy(net(x), targets, size_average=False)
grad = torch.autograd.grad(loss, [x])[0]
if metalearner.targeted:
x = x.detach() - metalearner.attack_step_size*torch.sign(grad.detach())
else:
x = x.detach() + metalearner.attack_step_size*torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs - metalearner.attack_epsilon), inputs + metalearner.attack_epsilon)
x = torch.clamp(x, 0.0, 1.0)
return x
class Meta(nn.Module):
"""
Meta Learner
"""
def __init__(self, args, config):
"""
:param args:
"""
super(Meta, self).__init__()
self.finetune_trainable = args.finetune_trainable
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.attack_query = args.attack_query
self.attack_support = args.attack_support
self.no_attack_validation = args.no_attack_validation
self.attack_epsilon = args.attack_epsilon
self.attack_step_size = args.attack_step_size
self.attack_steps = args.attack_steps
self.eval_attack_steps = args.eval_attack_steps
self.net = Learner(config, args.imgc, args.imgsz)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
self.no_random_start = args.no_random_start
self.targeted = args.targeted
def clip_grad_by_norm_(self, grad, max_norm):
"""
in-place gradient clipping.
:param grad: list of gradients
:param max_norm: maximum norm allowable
:return:
"""
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [b, setsz, c_, h, w]
:param y_spt: [b, setsz]
:param x_qry: [b, querysz, c_, h, w]
:param y_qry: [b, querysz]
:return:
"""
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)] # losses_q[i] is the loss on step i
corrects = [0 for _ in range(self.update_step + 1)]
for i in range(task_num):
# 1. run the i-th task and compute loss for k=0
if self.attack_support:
logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i]), vars=None, bn_training=True)
else:
logits = self.net(x_spt[i], vars=None, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, self.net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[1] += loss_q
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
# 1. run the i-th task and compute loss for k=1~K-1
if self.attack_support:
logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i], params = fast_weights), fast_weights, bn_training=True)
else:
logits = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
if self.attack_query:
logits_q = self.net(inputsPGD(self, self.net, x_qry[i], y_qry[i], params = fast_weights), fast_weights, bn_training=True)
else:
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
# end of all tasks
# sum over all losses on query set across all tasks
loss_q = losses_q[-1] / task_num
# optimize theta parameters
self.meta_optim.zero_grad()
loss_q.backward()
# print('meta update')
# for p in self.net.parameters()[:5]:
# print(torch.norm(p).item())
self.meta_optim.step()
accs = np.array(corrects) / (querysz * task_num)
return accs
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
print('Validating...')
querysz = x_qry.size(0)
natural_corrects = [0 for _ in range(self.update_step_test + 1)]
robust_corrects = [0 for _ in range(self.update_step_test + 1)]
# in order to not ruin the state of running_mean/variance and bn_weight/bias
# we finetunning on the copied model instead of self.net
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, net.parameters(), bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[0] = natural_corrects[0] + natural_correct
# [setsz, nway]
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, net.parameters(), evaluate=True), net.parameters(), bn_training=True)
# [setsz]
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
# scalar
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[0] = robust_corrects[0] + robust_correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, fast_weights, bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[1] = natural_corrects[1] + natural_correct
# [setsz, nway]
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
# [setsz]
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
# scalar
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[1] = robust_corrects[1] + robust_correct
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = net(x_qry, fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy
natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)
with torch.no_grad():
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item() # convert to numpy
robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct
del net
natural_accs = np.array(natural_corrects) / querysz
robust_accs = np.array(robust_corrects) / querysz
########################### DO THE SAME THING BUT ADVERSARIALLY TRAINED ON SUPPORT ########################
natural_corrects = [0 for _ in range(self.update_step_test + 1)]
robust_corrects = [0 for _ in range(self.update_step_test + 1)]
# in order to not ruin the state of running_mean/variance and bn_weight/bias
# we finetunning on the copied model instead of self.net
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits = net(inputsPGD(self, net, x_spt, y_spt), bn_training=True)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, net.parameters(), bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[0] = natural_corrects[0] + natural_correct
# [setsz, nway]
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, net.parameters(), evaluate=True), net.parameters(), bn_training=True)
# [setsz]
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
# scalar
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[0] = robust_corrects[0] + robust_correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, fast_weights, bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[1] = natural_corrects[1] + natural_correct
# [setsz, nway]
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
# [setsz]
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
# scalar
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[1] = robust_corrects[1] + robust_correct
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits = net(inputsPGD(self, net, x_spt, y_spt, params = fast_weights), fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = net(x_qry, fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy
natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)
with torch.no_grad():
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item() # convert to numpy
robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct
del net
natural_accs_advTrained = np.array(natural_corrects) / querysz
robust_accs_advTrained = np.array(robust_corrects) / querysz
return natural_accs, robust_accs, natural_accs_advTrained, robust_accs_advTrained
def main():
pass
if __name__ == '__main__':
main()
| 43.183938
| 141
| 0.599736
|
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from learner import Learner
from copy import deepcopy
def zero_nontrainable_grads(grads, trainable_layers=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]):
for index, grad_tensor in enumerate(grads):
if index not in trainable_layers:
grad_tensor = torch.zeros_like(grad_tensor)
def inputsPGD(metalearner, net, inputs, targets, params = False, evaluate = False):
if evaluate:
attack_steps = metalearner.eval_attack_steps
else:
attack_steps = metalearner.attack_steps
x = inputs.detach()
if not metalearner.no_random_start:
x = x + torch.zeros_like(x).uniform_(-metalearner.attack_epsilon, metalearner.attack_epsilon)
for i in range(attack_steps):
x.requires_grad_()
with torch.enable_grad():
if params:
loss = F.cross_entropy(net(x, params), targets, size_average=False)
else:
loss = F.cross_entropy(net(x), targets, size_average=False)
grad = torch.autograd.grad(loss, [x])[0]
if metalearner.targeted:
x = x.detach() - metalearner.attack_step_size*torch.sign(grad.detach())
else:
x = x.detach() + metalearner.attack_step_size*torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs - metalearner.attack_epsilon), inputs + metalearner.attack_epsilon)
x = torch.clamp(x, 0.0, 1.0)
return x
class Meta(nn.Module):
def __init__(self, args, config):
super(Meta, self).__init__()
self.finetune_trainable = args.finetune_trainable
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.attack_query = args.attack_query
self.attack_support = args.attack_support
self.no_attack_validation = args.no_attack_validation
self.attack_epsilon = args.attack_epsilon
self.attack_step_size = args.attack_step_size
self.attack_steps = args.attack_steps
self.eval_attack_steps = args.eval_attack_steps
self.net = Learner(config, args.imgc, args.imgsz)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
self.no_random_start = args.no_random_start
self.targeted = args.targeted
def clip_grad_by_norm_(self, grad, max_norm):
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry):
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)]
corrects = [0 for _ in range(self.update_step + 1)]
for i in range(task_num):
if self.attack_support:
logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i]), vars=None, bn_training=True)
else:
logits = self.net(x_spt[i], vars=None, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, self.net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))
with torch.no_grad():
logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
with torch.no_grad():
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[1] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
if self.attack_support:
logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i], params = fast_weights), fast_weights, bn_training=True)
else:
logits = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
if self.attack_query:
logits_q = self.net(inputsPGD(self, self.net, x_qry[i], y_qry[i], params = fast_weights), fast_weights, bn_training=True)
else:
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[k + 1] = corrects[k + 1] + correct
loss_q = losses_q[-1] / task_num
self.meta_optim.zero_grad()
loss_q.backward()
self.meta_optim.step()
accs = np.array(corrects) / (querysz * task_num)
return accs
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
assert len(x_spt.shape) == 4
print('Validating...')
querysz = x_qry.size(0)
natural_corrects = [0 for _ in range(self.update_step_test + 1)]
robust_corrects = [0 for _ in range(self.update_step_test + 1)]
net = deepcopy(self.net)
logits = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
with torch.no_grad():
logits_q = net(x_qry, net.parameters(), bn_training=True)
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[0] = natural_corrects[0] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, net.parameters(), evaluate=True), net.parameters(), bn_training=True)
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[0] = robust_corrects[0] + robust_correct
with torch.no_grad():
logits_q = net(x_qry, fast_weights, bn_training=True)
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[1] = natural_corrects[1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[1] = robust_corrects[1] + robust_correct
for k in range(1, self.update_step_test):
logits = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, fast_weights)
zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = net(x_qry, fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)
with torch.no_grad():
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct
del net
natural_accs = np.array(natural_corrects) / querysz
robust_accs = np.array(robust_corrects) / querysz
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
natural_correct = torch.eq(pred_q, y_qry).sum().item()
natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct
robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)
robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)
with torch.no_grad():
robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)
robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()
robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct
del net
natural_accs_advTrained = np.array(natural_corrects) / querysz
robust_accs_advTrained = np.array(robust_corrects) / querysz
return natural_accs, robust_accs, natural_accs_advTrained, robust_accs_advTrained
def main():
pass
if __name__ == '__main__':
main()
| true
| true
|
f715745078d64aff302f2395177ab959a49111ab
| 3,310
|
py
|
Python
|
steps/nnet3/train.py
|
ondrejklejch/learning_to_adapt
|
6de0b98370769596da16a1688582925ea2e1fa29
|
[
"Apache-2.0"
] | 18
|
2019-10-24T04:42:16.000Z
|
2021-11-24T03:07:59.000Z
|
steps/nnet3/train.py
|
choko/learning_to_adapt
|
6de0b98370769596da16a1688582925ea2e1fa29
|
[
"Apache-2.0"
] | null | null | null |
steps/nnet3/train.py
|
choko/learning_to_adapt
|
6de0b98370769596da16a1688582925ea2e1fa29
|
[
"Apache-2.0"
] | 4
|
2018-08-31T01:08:50.000Z
|
2019-05-10T12:12:57.000Z
|
import sys
import numpy as np
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from keras.models import Model
from keras.layers import Input, Activation, Conv1D, BatchNormalization
from keras.optimizers import Adam
from learning_to_adapt.model import LHUC, Renorm
from learning_to_adapt.utils import load_dataset, load_utt_to_spk, load_utt_to_pdfs, load_lda
import keras
import tensorflow as tf
config = tf.ConfigProto()
config.intra_op_parallelism_threads=1
config.inter_op_parallelism_threads=1
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
def create_model(hidden_dim=350, lda_path=None):
lda, bias = load_lda(lda_path)
lda = lda.reshape((5, 40, 200))
feats = Input(shape=(None, 40))
x = Conv1D(200, kernel_size=5, name="lda", trainable=False, weights=[lda, bias])(feats)
layers = [(1, 1), (2, 3), (2, 6), (2, 9), (2, 6), (1, 1)]
for i, (kernel_size, dilation_rate) in enumerate(layers):
name = "tdnn%d" % (i + 1)
x = Conv1D(hidden_dim, kernel_size=kernel_size, dilation_rate=dilation_rate, activation="relu", name="%s.affine" % name)(x)
x = BatchNormalization(name="%s.batchnorm" % name)(x)
x = LHUC(name="lhuc.%s" % name, trainable=False)(x)
y = Conv1D(4208, kernel_size=1, activation="softmax", name="output.affine")(x)
return Model(inputs=[feats], outputs=[y])
if __name__ == '__main__':
train_data = sys.argv[1]
val_data = sys.argv[2]
utt2spk = sys.argv[3]
pdfs = sys.argv[4]
left_context = int(sys.argv[5])
right_context = int(sys.argv[6])
lda_path = sys.argv[7]
output_path = sys.argv[8]
num_epochs = 400
batch_size = 256
learning_rate = 0.0015
utt_to_spk = load_utt_to_spk(utt2spk)
utt_to_pdfs = load_utt_to_pdfs(pdfs)
train_dataset = load_dataset(train_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.prefetch(1024)
x, _, y = train_dataset.make_one_shot_iterator().get_next()
val_dataset = load_dataset(val_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.take(512).cache().repeat()
val_x, _, val_y = val_dataset.make_one_shot_iterator().get_next()
model = create_model(600, lda_path)
model.compile(
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
optimizer=Adam(lr=learning_rate, amsgrad=True, clipvalue=1.)
)
callbacks = [
CSVLogger(output_path + "model.csv"),
ModelCheckpoint(filepath=output_path + "model.{epoch:02d}.h5", save_best_only=False, period=10),
ModelCheckpoint(filepath=output_path + "model.best.h5", save_best_only=True),
LearningRateScheduler(lambda epoch, lr: learning_rate - epoch * (learning_rate - learning_rate / 10) / num_epochs, verbose=0)
]
model.fit(x, y,
steps_per_epoch=2000,
epochs=num_epochs,
validation_data=(val_x, val_y),
validation_steps=512,
callbacks=callbacks
)
| 37.613636
| 161
| 0.710876
|
import sys
import numpy as np
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from keras.models import Model
from keras.layers import Input, Activation, Conv1D, BatchNormalization
from keras.optimizers import Adam
from learning_to_adapt.model import LHUC, Renorm
from learning_to_adapt.utils import load_dataset, load_utt_to_spk, load_utt_to_pdfs, load_lda
import keras
import tensorflow as tf
config = tf.ConfigProto()
config.intra_op_parallelism_threads=1
config.inter_op_parallelism_threads=1
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
def create_model(hidden_dim=350, lda_path=None):
lda, bias = load_lda(lda_path)
lda = lda.reshape((5, 40, 200))
feats = Input(shape=(None, 40))
x = Conv1D(200, kernel_size=5, name="lda", trainable=False, weights=[lda, bias])(feats)
layers = [(1, 1), (2, 3), (2, 6), (2, 9), (2, 6), (1, 1)]
for i, (kernel_size, dilation_rate) in enumerate(layers):
name = "tdnn%d" % (i + 1)
x = Conv1D(hidden_dim, kernel_size=kernel_size, dilation_rate=dilation_rate, activation="relu", name="%s.affine" % name)(x)
x = BatchNormalization(name="%s.batchnorm" % name)(x)
x = LHUC(name="lhuc.%s" % name, trainable=False)(x)
y = Conv1D(4208, kernel_size=1, activation="softmax", name="output.affine")(x)
return Model(inputs=[feats], outputs=[y])
if __name__ == '__main__':
train_data = sys.argv[1]
val_data = sys.argv[2]
utt2spk = sys.argv[3]
pdfs = sys.argv[4]
left_context = int(sys.argv[5])
right_context = int(sys.argv[6])
lda_path = sys.argv[7]
output_path = sys.argv[8]
num_epochs = 400
batch_size = 256
learning_rate = 0.0015
utt_to_spk = load_utt_to_spk(utt2spk)
utt_to_pdfs = load_utt_to_pdfs(pdfs)
train_dataset = load_dataset(train_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.prefetch(1024)
x, _, y = train_dataset.make_one_shot_iterator().get_next()
val_dataset = load_dataset(val_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.take(512).cache().repeat()
val_x, _, val_y = val_dataset.make_one_shot_iterator().get_next()
model = create_model(600, lda_path)
model.compile(
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
optimizer=Adam(lr=learning_rate, amsgrad=True, clipvalue=1.)
)
callbacks = [
CSVLogger(output_path + "model.csv"),
ModelCheckpoint(filepath=output_path + "model.{epoch:02d}.h5", save_best_only=False, period=10),
ModelCheckpoint(filepath=output_path + "model.best.h5", save_best_only=True),
LearningRateScheduler(lambda epoch, lr: learning_rate - epoch * (learning_rate - learning_rate / 10) / num_epochs, verbose=0)
]
model.fit(x, y,
steps_per_epoch=2000,
epochs=num_epochs,
validation_data=(val_x, val_y),
validation_steps=512,
callbacks=callbacks
)
| true
| true
|
f71574ae5ca34081f8ffb4a0fb83b14cf338b46f
| 2,364
|
py
|
Python
|
tests/test_euromil.py
|
rse01/pyeuromil
|
17f7c800f6f10289d3211bd9d783d1f516594f6c
|
[
"MIT"
] | null | null | null |
tests/test_euromil.py
|
rse01/pyeuromil
|
17f7c800f6f10289d3211bd9d783d1f516594f6c
|
[
"MIT"
] | null | null | null |
tests/test_euromil.py
|
rse01/pyeuromil
|
17f7c800f6f10289d3211bd9d783d1f516594f6c
|
[
"MIT"
] | null | null | null |
""" Unit tests for euromil.py """
from datetime import date
import pytest
from pyeuromil import euro_results, euro_draw_dates, euro_stats
def test_euromil_results_year_not_exist():
""" results of year test (year does not exists) """
with pytest.raises(ValueError):
results = euro_results("abcd")
assert results is None
results = euro_results(1920)
assert results is None
results = euro_results(2999)
assert results is None
def test_euromil_results_invalid_date():
""" results method (invalid date) """
with pytest.raises(ValueError):
results = euro_results("111")
assert results is None
with pytest.raises(ValueError):
results = euro_results(date(2011, 1, 1), "111")
assert results is None
def test_euromil_results_no_param():
""" results method (no param) """
results = euro_results()
assert results[0].date.year == 2011
assert results[-1].date.year == 2020
def test_euromil_results_start_date_only():
""" results method (start_date only) """
results = euro_results(date(2012, 12, 12))
assert results[0].date == date(2012, 12, 28)
assert results[-1].date > date(2018, 1, 1)
def test_euromil_results_both_dates_empty():
""" results method (both dates, no results) """
results = euro_results(date(2012, 12, 12), date(2012, 12, 13))
assert results == []
def test_euromil_results_both_dates_wrong_order():
""" results method (end_date < start_date) """
results = euro_results(date(2018, 12, 12), date(2011, 12, 13))
assert results == []
def test_euromil_results_both_dates_one_result():
""" results method (end_date < start_date) """
results = euro_results(date(2018, 10, 18), date(2018, 10, 20))
assert len(results) == 1
assert results[0].numbers[0] == 1
assert results[0].stars[0] == 3
def test_euromil_draw_dates():
""" test draw_dates method """
assert date(2018, 10, 19) in euro_draw_dates()
assert date(2011, 6, 3) in euro_draw_dates(date(2011, 1, 1), date(2011, 12, 31))
assert date(2013, 11, 15) in euro_draw_dates(date(2013, 10, 30), date(2013, 11, 15))
def test_euromil_stats():
""" test euro_stats method """
stats = euro_stats(date(2017, 10, 27), date(2018, 10, 27))
assert (stats["st4"]) == 25
assert (stats["15"]) == 17
| 29.924051
| 88
| 0.661168
|
from datetime import date
import pytest
from pyeuromil import euro_results, euro_draw_dates, euro_stats
def test_euromil_results_year_not_exist():
with pytest.raises(ValueError):
results = euro_results("abcd")
assert results is None
results = euro_results(1920)
assert results is None
results = euro_results(2999)
assert results is None
def test_euromil_results_invalid_date():
with pytest.raises(ValueError):
results = euro_results("111")
assert results is None
with pytest.raises(ValueError):
results = euro_results(date(2011, 1, 1), "111")
assert results is None
def test_euromil_results_no_param():
results = euro_results()
assert results[0].date.year == 2011
assert results[-1].date.year == 2020
def test_euromil_results_start_date_only():
results = euro_results(date(2012, 12, 12))
assert results[0].date == date(2012, 12, 28)
assert results[-1].date > date(2018, 1, 1)
def test_euromil_results_both_dates_empty():
results = euro_results(date(2012, 12, 12), date(2012, 12, 13))
assert results == []
def test_euromil_results_both_dates_wrong_order():
results = euro_results(date(2018, 12, 12), date(2011, 12, 13))
assert results == []
def test_euromil_results_both_dates_one_result():
results = euro_results(date(2018, 10, 18), date(2018, 10, 20))
assert len(results) == 1
assert results[0].numbers[0] == 1
assert results[0].stars[0] == 3
def test_euromil_draw_dates():
assert date(2018, 10, 19) in euro_draw_dates()
assert date(2011, 6, 3) in euro_draw_dates(date(2011, 1, 1), date(2011, 12, 31))
assert date(2013, 11, 15) in euro_draw_dates(date(2013, 10, 30), date(2013, 11, 15))
def test_euromil_stats():
stats = euro_stats(date(2017, 10, 27), date(2018, 10, 27))
assert (stats["st4"]) == 25
assert (stats["15"]) == 17
| true
| true
|
f71575f5748372d7306937a1f31ad94c872397b7
| 16,014
|
py
|
Python
|
nemo/collections/nlp/data/data_utils/data_preprocessing.py
|
madhukarkm/NeMo
|
648c97f076147684bee6aaada209f2f20adcaf5d
|
[
"Apache-2.0"
] | 4,145
|
2019-09-13T08:29:43.000Z
|
2022-03-31T18:31:44.000Z
|
nemo/collections/nlp/data/data_utils/data_preprocessing.py
|
madhukarkm/NeMo
|
648c97f076147684bee6aaada209f2f20adcaf5d
|
[
"Apache-2.0"
] | 2,031
|
2019-09-17T16:51:39.000Z
|
2022-03-31T23:52:41.000Z
|
nemo/collections/nlp/data/data_utils/data_preprocessing.py
|
madhukarkm/NeMo
|
648c97f076147684bee6aaada209f2f20adcaf5d
|
[
"Apache-2.0"
] | 1,041
|
2019-09-13T10:08:21.000Z
|
2022-03-30T06:37:38.000Z
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pickle
import random
import re
import string
from collections import Counter
import numpy as np
import torch
from tqdm.auto import tqdm
from nemo.utils import logging
from nemo.utils.env_var_parsing import get_envint
__all__ = [
'DataProcessor',
'get_label_stats',
'partition_data',
'write_files',
'write_data',
'create_dataset',
'read_csv',
'get_dataset',
'partition',
'map_entities',
'get_entities',
'get_data',
'reverse_dict',
'get_intent_labels',
'get_stats',
'DATABASE_EXISTS_TMP',
'MODE_EXISTS_TMP',
'is_whitespace',
'write_vocab',
'if_exist',
'remove_punctuation_from_sentence',
'dataset_to_ids',
'get_freq_weights',
'fill_class_weights',
'normalize_answer',
'get_labels_to_labels_id_mapping',
'get_vocab',
'find_newlines',
'load_data_indices',
'chinese_punctuation',
'check_chinese_char',
'normalize_chinese_answer',
]
DATABASE_EXISTS_TMP = '{} dataset has already been processed and stored at {}'
MODE_EXISTS_TMP = '{} mode of {} dataset has already been processed and stored at {}'
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
# if sys.version_info[0] == 2:
# line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
chinese_punctuation = {
'——',
'‘',
'’',
'“',
'”',
'…',
'、',
'。',
'〈',
'〉',
'《',
'》',
'「',
'」',
'『',
'』',
'【',
'】',
'〔',
'〕',
'!',
'(',
')',
',',
'.',
':',
';',
'?',
}
def check_chinese_char(ch):
"""Check if a character is in Chinese."""
if u'\u4e00' <= ch <= u'\u9fff' or ch in chinese_punctuation:
return True
else:
return False
def normalize_chinese_answer(text):
"""Remove the Chinese punctuation and separate Chinese answers to char-level"""
def remove_punc(text):
exclude = chinese_punctuation
return ''.join(ch for ch in text if ch not in exclude)
def separate_char(text):
ch_list = []
for ch in text:
ch_list.append(ch)
return ch_list
return separate_char(remove_punc(text))
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_label_stats(labels, outfile='stats.tsv', verbose=True):
'''
Args:
labels: list of all labels
outfile: path to the file where to save label stats
Returns:
total (int): total number of labels
label_frequencies (list of tuples): each tuple represent (label, label frequency)
max id of the labels
'''
labels = Counter(labels)
total = sum(labels.values())
out = open(outfile, 'w')
i = 0
freq_dict = {}
label_frequencies = labels.most_common()
for k, v in label_frequencies:
out.write(f'{k}\t\t{round(v/total,5)}\t\t{v}\n')
if verbose and i < 3:
logging.info(f'label: {k}, {v} out of {total} ({(v / total)*100.0:.2f}%).')
i += 1
freq_dict[k] = v
return total, freq_dict, max(labels.keys())
def partition_data(intent_queries, slot_tags, split=0.1):
n = len(intent_queries)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev_intents, dev_slots, train_intents, train_slots = [], [], [], []
dev_intents.append('sentence\tlabel\n')
train_intents.append('sentence\tlabel\n')
for i, item in enumerate(intent_queries):
if i in dev_idx:
dev_intents.append(item)
dev_slots.append(slot_tags[i])
else:
train_intents.append(item)
train_slots.append(slot_tags[i])
return train_intents, train_slots, dev_intents, dev_slots
def write_files(data, outfile):
with open(outfile, 'w') as f:
for item in data:
item = f'{item.strip()}\n'
f.write(item)
def write_data(data, slot_dict, intent_dict, outfold, mode, uncased):
intent_file = open(f'{outfold}/{mode}.tsv', 'w')
intent_file.write('sentence\tlabel\n')
slot_file = open(f'{outfold}/{mode}_slots.tsv', 'w')
for tokens, slots, intent in data:
text = ' '.join(tokens)
if uncased:
text = text.lower()
intent_file.write(f'{text}\t{intent_dict[intent]}\n')
slots = [str(slot_dict[slot]) for slot in slots]
slot_file.write(' '.join(slots) + '\n')
intent_file.close()
slot_file.close()
def create_dataset(train, dev, slots, intents, uncased, outfold):
os.makedirs(outfold, exist_ok=True)
if 'O' in slots:
slots.remove('O')
slots = sorted(list(slots)) + ['O']
intents = sorted(list(intents))
slots = write_vocab(slots, f'{outfold}/dict.slots.csv')
intents = write_vocab(intents, f'{outfold}/dict.intents.csv')
write_data(train, slots, intents, outfold, 'train', uncased)
write_data(dev, slots, intents, outfold, 'test', uncased)
def read_csv(file_path):
rows = []
with open(file_path, 'r') as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
rows.append(row)
return rows
def get_dataset(files, dev_split=0.1):
# entity2value, value2entity = get_entities(files)
data, slots, intents = get_data(files)
if len(data) == 1:
train, dev = partition(data[0], split=dev_split)
else:
train, dev = data[0], data[1]
return train, dev, slots, intents
def partition(data, split=0.1):
n = len(data)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev, train = [], []
for i, item in enumerate(data):
if i in dev_idx:
dev.append(item)
else:
train.append(item)
return train, dev
def map_entities(entity2value, entities):
for key in entities:
if 'data' in entities[key]:
if key not in entity2value:
entity2value[key] = set([])
values = []
for value in entities[key]['data']:
values.append(value['value'])
values.extend(value['synonyms'])
entity2value[key] = entity2value[key] | set(values)
return entity2value
def get_entities(files):
entity2value = {}
for file in files:
with open(file, 'r') as json_file:
data = json.load(json_file)
entity2value = map_entities(entity2value, data['entities'])
value2entity = reverse_dict(entity2value)
return entity2value, value2entity
def get_data(files):
all_data, all_slots, all_intents = [], set(['O']), set()
for file in files:
file_data = []
with open(file, 'r') as json_file:
data = json.load(json_file)
for intent in data['intents']:
all_intents.add(intent)
utterances = data['intents'][intent]['utterances']
for utterance in utterances:
tokens, slots = [], []
for frag in utterance['data']:
frag_tokens = frag['text'].strip().split()
tokens.extend(frag_tokens)
if 'slot_name' not in frag:
slot = 'O'
else:
slot = frag['slot_name']
all_slots.add(slot)
slots.extend([slot] * len(frag_tokens))
file_data.append((tokens, slots, intent))
all_data.append(file_data)
return all_data, all_slots, all_intents
def reverse_dict(entity2value):
value2entity = {}
for entity in entity2value:
for value in entity2value[entity]:
value2entity[value] = entity
return value2entity
def get_intent_labels(intent_file):
labels = {}
label = 0
with open(intent_file, 'r') as f:
for line in f:
intent = line.strip()
labels[intent] = label
label += 1
return labels
def get_stats(lengths):
logging.info('Some stats of the lengths of the sequences:')
lengths = np.asarray(lengths)
logging.info(
f'Min: {np.min(lengths)} | \
Max: {np.max(lengths)} | \
Mean: {np.mean(lengths)} | \
Median: {np.median(lengths)}'
)
logging.info(f'75 percentile: {np.percentile(lengths, 75):.2f}')
logging.info(f'99 percentile: {np.percentile(lengths, 99):.2f}')
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def write_vocab(items, outfile):
vocab = {}
idx = 0
with open(outfile, 'w') as f:
for item in items:
f.write(item + '\n')
vocab[item] = idx
idx += 1
return vocab
def get_labels_to_labels_id_mapping(file):
'''
Reads labels from the file and returns labels to id mapping dictionary
Args:
file: path to file
Returns:
labels to id mapping dictionary
'''
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
label_ids = {lines[i]: i for i in range(len(lines))}
return label_ids
def if_exist(outfold, files):
if not os.path.exists(outfold):
return False
for file in files:
if not os.path.exists(f'{outfold}/{file}'):
return False
return True
def remove_punctuation_from_sentence(sentence):
sentence = re.sub('[' + string.punctuation + ']', '', sentence)
sentence = sentence.lower()
return sentence
def dataset_to_ids(dataset, tokenizer, cache_ids=False, add_bos_eos=True, cache_data_per_node=False, use_cache=False):
"""
Reads dataset from file line by line, tokenizes each line with tokenizer,
and returns list of lists which corresponds to ids of tokenized strings.
Args:
dataset (str): path to dataset
tokenizer: tokenizer to convert text into ids
cache_ids (bool): if True, ids are saved to disk as pickle file
with similar name (e.g., data.txt --> data.txt.pkl)
add_bos_eos (bool): whether to add <s> and </s> symbols (e.g., for NMT)
cache_data_per_node (bool): Cache data on local_rank 0. Use when there is not a shared-filesystem.
use_cache (bool): Use cached ids if they exist.
Returns:
ids: list of ids which correspond to tokenized strings of the dataset
"""
cached_ids_dataset = dataset + str(".pkl")
if use_cache and os.path.isfile(cached_ids_dataset):
logging.info("Loading cached tokenized dataset ...")
ids = pickle.load(open(cached_ids_dataset, "rb"))
else:
logging.info(f"Tokenizing dataset {dataset}...")
data = open(dataset, "rb").readlines()
ids = []
for sentence in tqdm(data, desc='Tokenizing sentence'):
sent_ids = tokenizer.text_to_ids(sentence.decode("utf-8"))
if add_bos_eos:
sent_ids = [tokenizer.bos_id] + sent_ids + [tokenizer.eos_id]
ids.append(sent_ids)
if cache_ids and (
not torch.distributed.is_initialized() or (cache_data_per_node and get_envint("LOCAL_RANK", 0) == 0)
):
logging.info("Caching tokenized dataset ...")
pickle.dump(ids, open(cached_ids_dataset, "wb"))
return ids
def get_freq_weights(label_freq):
"""
Goal is to give more weight to the classes with less samples
so as to match the ones with the higher frequencies. We achieve this by
dividing the total frequency by the freq of each label to calculate its weight.
"""
total_size = 0
for lf in label_freq.values():
total_size += lf
weighted_slots = {label: (total_size / (len(label_freq) * freq)) for label, freq in label_freq.items()}
return weighted_slots
def fill_class_weights(weights, max_id=-1):
"""
Gets a dictionary of labels with their weights and creates a list with size of the labels filled with those weights.
Missing labels in the dictionary would get value 1.
Args:
weights: dictionary of weights for labels, labels as keys and weights are their values
max_id: the largest label id in the dataset, default=-1 would consider the largest label in the weights dictionary as max_id
Returns:
weights_list: list of weights for labels
"""
if max_id < 0:
max_id = 0
for l in weights.keys():
max_id = max(max_id, l)
all_weights = [1.0] * (max_id + 1)
for i in range(len(all_weights)):
if i in weights:
all_weights[i] = weights[i]
return all_weights
def get_vocab(file):
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
labels = {i: lines[i] for i in range(len(lines))}
return labels
def find_newlines(contents):
"""
Finds all of the newline positions in a text file.
"""
start = 0
while True:
try:
# index and split are much faster than Python for loops
new_start = contents.index(b"\n", start)
line = (
contents[start:new_start]
.replace(b"\xc2\x99", b" ")
.replace(b"\xc2\xa0", b" ")
.decode("utf-8", errors="ignore")
)
if len(line.split()) > 0:
yield start
start = new_start + 1
except ValueError:
break
def load_data_indices(idx_file: str, data_file: str, savename: str):
"""
Loads dataset index file if it exsits
"""
data_dir = data_file[: data_file.rfind('/')]
mode = data_file[data_file.rfind('/') + 1 : data_file.rfind('.')]
idx_file = f"{data_dir}/{mode}_{savename}.pkl"
if os.path.isfile(idx_file):
# If the sentence indices file already exists, load from it
with open(idx_file, "rb") as f:
indices = pickle.load(f)
return indices, idx_file, data_dir
return None, idx_file, data_dir
| 29.710575
| 132
| 0.604783
|
import csv
import json
import os
import pickle
import random
import re
import string
from collections import Counter
import numpy as np
import torch
from tqdm.auto import tqdm
from nemo.utils import logging
from nemo.utils.env_var_parsing import get_envint
__all__ = [
'DataProcessor',
'get_label_stats',
'partition_data',
'write_files',
'write_data',
'create_dataset',
'read_csv',
'get_dataset',
'partition',
'map_entities',
'get_entities',
'get_data',
'reverse_dict',
'get_intent_labels',
'get_stats',
'DATABASE_EXISTS_TMP',
'MODE_EXISTS_TMP',
'is_whitespace',
'write_vocab',
'if_exist',
'remove_punctuation_from_sentence',
'dataset_to_ids',
'get_freq_weights',
'fill_class_weights',
'normalize_answer',
'get_labels_to_labels_id_mapping',
'get_vocab',
'find_newlines',
'load_data_indices',
'chinese_punctuation',
'check_chinese_char',
'normalize_chinese_answer',
]
DATABASE_EXISTS_TMP = '{} dataset has already been processed and stored at {}'
MODE_EXISTS_TMP = '{} mode of {} dataset has already been processed and stored at {}'
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
chinese_punctuation = {
'——',
'‘',
'’',
'“',
'”',
'…',
'、',
'。',
'〈',
'〉',
'《',
'》',
'「',
'」',
'『',
'』',
'【',
'】',
'〔',
'〕',
'!',
'(',
')',
',',
'.',
':',
';',
'?',
}
def check_chinese_char(ch):
if u'\u4e00' <= ch <= u'\u9fff' or ch in chinese_punctuation:
return True
else:
return False
def normalize_chinese_answer(text):
def remove_punc(text):
exclude = chinese_punctuation
return ''.join(ch for ch in text if ch not in exclude)
def separate_char(text):
ch_list = []
for ch in text:
ch_list.append(ch)
return ch_list
return separate_char(remove_punc(text))
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_label_stats(labels, outfile='stats.tsv', verbose=True):
labels = Counter(labels)
total = sum(labels.values())
out = open(outfile, 'w')
i = 0
freq_dict = {}
label_frequencies = labels.most_common()
for k, v in label_frequencies:
out.write(f'{k}\t\t{round(v/total,5)}\t\t{v}\n')
if verbose and i < 3:
logging.info(f'label: {k}, {v} out of {total} ({(v / total)*100.0:.2f}%).')
i += 1
freq_dict[k] = v
return total, freq_dict, max(labels.keys())
def partition_data(intent_queries, slot_tags, split=0.1):
n = len(intent_queries)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev_intents, dev_slots, train_intents, train_slots = [], [], [], []
dev_intents.append('sentence\tlabel\n')
train_intents.append('sentence\tlabel\n')
for i, item in enumerate(intent_queries):
if i in dev_idx:
dev_intents.append(item)
dev_slots.append(slot_tags[i])
else:
train_intents.append(item)
train_slots.append(slot_tags[i])
return train_intents, train_slots, dev_intents, dev_slots
def write_files(data, outfile):
with open(outfile, 'w') as f:
for item in data:
item = f'{item.strip()}\n'
f.write(item)
def write_data(data, slot_dict, intent_dict, outfold, mode, uncased):
intent_file = open(f'{outfold}/{mode}.tsv', 'w')
intent_file.write('sentence\tlabel\n')
slot_file = open(f'{outfold}/{mode}_slots.tsv', 'w')
for tokens, slots, intent in data:
text = ' '.join(tokens)
if uncased:
text = text.lower()
intent_file.write(f'{text}\t{intent_dict[intent]}\n')
slots = [str(slot_dict[slot]) for slot in slots]
slot_file.write(' '.join(slots) + '\n')
intent_file.close()
slot_file.close()
def create_dataset(train, dev, slots, intents, uncased, outfold):
os.makedirs(outfold, exist_ok=True)
if 'O' in slots:
slots.remove('O')
slots = sorted(list(slots)) + ['O']
intents = sorted(list(intents))
slots = write_vocab(slots, f'{outfold}/dict.slots.csv')
intents = write_vocab(intents, f'{outfold}/dict.intents.csv')
write_data(train, slots, intents, outfold, 'train', uncased)
write_data(dev, slots, intents, outfold, 'test', uncased)
def read_csv(file_path):
rows = []
with open(file_path, 'r') as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
rows.append(row)
return rows
def get_dataset(files, dev_split=0.1):
data, slots, intents = get_data(files)
if len(data) == 1:
train, dev = partition(data[0], split=dev_split)
else:
train, dev = data[0], data[1]
return train, dev, slots, intents
def partition(data, split=0.1):
n = len(data)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev, train = [], []
for i, item in enumerate(data):
if i in dev_idx:
dev.append(item)
else:
train.append(item)
return train, dev
def map_entities(entity2value, entities):
for key in entities:
if 'data' in entities[key]:
if key not in entity2value:
entity2value[key] = set([])
values = []
for value in entities[key]['data']:
values.append(value['value'])
values.extend(value['synonyms'])
entity2value[key] = entity2value[key] | set(values)
return entity2value
def get_entities(files):
entity2value = {}
for file in files:
with open(file, 'r') as json_file:
data = json.load(json_file)
entity2value = map_entities(entity2value, data['entities'])
value2entity = reverse_dict(entity2value)
return entity2value, value2entity
def get_data(files):
all_data, all_slots, all_intents = [], set(['O']), set()
for file in files:
file_data = []
with open(file, 'r') as json_file:
data = json.load(json_file)
for intent in data['intents']:
all_intents.add(intent)
utterances = data['intents'][intent]['utterances']
for utterance in utterances:
tokens, slots = [], []
for frag in utterance['data']:
frag_tokens = frag['text'].strip().split()
tokens.extend(frag_tokens)
if 'slot_name' not in frag:
slot = 'O'
else:
slot = frag['slot_name']
all_slots.add(slot)
slots.extend([slot] * len(frag_tokens))
file_data.append((tokens, slots, intent))
all_data.append(file_data)
return all_data, all_slots, all_intents
def reverse_dict(entity2value):
value2entity = {}
for entity in entity2value:
for value in entity2value[entity]:
value2entity[value] = entity
return value2entity
def get_intent_labels(intent_file):
labels = {}
label = 0
with open(intent_file, 'r') as f:
for line in f:
intent = line.strip()
labels[intent] = label
label += 1
return labels
def get_stats(lengths):
logging.info('Some stats of the lengths of the sequences:')
lengths = np.asarray(lengths)
logging.info(
f'Min: {np.min(lengths)} | \
Max: {np.max(lengths)} | \
Mean: {np.mean(lengths)} | \
Median: {np.median(lengths)}'
)
logging.info(f'75 percentile: {np.percentile(lengths, 75):.2f}')
logging.info(f'99 percentile: {np.percentile(lengths, 99):.2f}')
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def write_vocab(items, outfile):
vocab = {}
idx = 0
with open(outfile, 'w') as f:
for item in items:
f.write(item + '\n')
vocab[item] = idx
idx += 1
return vocab
def get_labels_to_labels_id_mapping(file):
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
label_ids = {lines[i]: i for i in range(len(lines))}
return label_ids
def if_exist(outfold, files):
if not os.path.exists(outfold):
return False
for file in files:
if not os.path.exists(f'{outfold}/{file}'):
return False
return True
def remove_punctuation_from_sentence(sentence):
sentence = re.sub('[' + string.punctuation + ']', '', sentence)
sentence = sentence.lower()
return sentence
def dataset_to_ids(dataset, tokenizer, cache_ids=False, add_bos_eos=True, cache_data_per_node=False, use_cache=False):
cached_ids_dataset = dataset + str(".pkl")
if use_cache and os.path.isfile(cached_ids_dataset):
logging.info("Loading cached tokenized dataset ...")
ids = pickle.load(open(cached_ids_dataset, "rb"))
else:
logging.info(f"Tokenizing dataset {dataset}...")
data = open(dataset, "rb").readlines()
ids = []
for sentence in tqdm(data, desc='Tokenizing sentence'):
sent_ids = tokenizer.text_to_ids(sentence.decode("utf-8"))
if add_bos_eos:
sent_ids = [tokenizer.bos_id] + sent_ids + [tokenizer.eos_id]
ids.append(sent_ids)
if cache_ids and (
not torch.distributed.is_initialized() or (cache_data_per_node and get_envint("LOCAL_RANK", 0) == 0)
):
logging.info("Caching tokenized dataset ...")
pickle.dump(ids, open(cached_ids_dataset, "wb"))
return ids
def get_freq_weights(label_freq):
total_size = 0
for lf in label_freq.values():
total_size += lf
weighted_slots = {label: (total_size / (len(label_freq) * freq)) for label, freq in label_freq.items()}
return weighted_slots
def fill_class_weights(weights, max_id=-1):
if max_id < 0:
max_id = 0
for l in weights.keys():
max_id = max(max_id, l)
all_weights = [1.0] * (max_id + 1)
for i in range(len(all_weights)):
if i in weights:
all_weights[i] = weights[i]
return all_weights
def get_vocab(file):
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
labels = {i: lines[i] for i in range(len(lines))}
return labels
def find_newlines(contents):
start = 0
while True:
try:
new_start = contents.index(b"\n", start)
line = (
contents[start:new_start]
.replace(b"\xc2\x99", b" ")
.replace(b"\xc2\xa0", b" ")
.decode("utf-8", errors="ignore")
)
if len(line.split()) > 0:
yield start
start = new_start + 1
except ValueError:
break
def load_data_indices(idx_file: str, data_file: str, savename: str):
data_dir = data_file[: data_file.rfind('/')]
mode = data_file[data_file.rfind('/') + 1 : data_file.rfind('.')]
idx_file = f"{data_dir}/{mode}_{savename}.pkl"
if os.path.isfile(idx_file):
with open(idx_file, "rb") as f:
indices = pickle.load(f)
return indices, idx_file, data_dir
return None, idx_file, data_dir
| true
| true
|
f7157672a7aaadbb0f7dae37f20ea58ef3e5d0da
| 12,933
|
py
|
Python
|
lib/model/config.py
|
Kenneth-Wong/tf-faster-rcnn
|
a6bd798df1b9075ebdfeb7744fffc13226c3a65e
|
[
"MIT"
] | null | null | null |
lib/model/config.py
|
Kenneth-Wong/tf-faster-rcnn
|
a6bd798df1b9075ebdfeb7744fffc13226c3a65e
|
[
"MIT"
] | null | null | null |
lib/model/config.py
|
Kenneth-Wong/tf-faster-rcnn
|
a6bd798df1b9075ebdfeb7744fffc13226c3a65e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Memory options
#
__C.MEM = edict()
# Number of memory iterations
__C.MEM.ITER = 2
# Height of the memory
__C.MEM.INIT_H = 20
# Width of the memory
__C.MEM.INIT_W = 20
# Channel of the memory
__C.MEM.C = 512
# Basic stds in the memory
__C.MEM.STD = 0.01
# Base stds in the memory update function for input features
__C.MEM.U_STD = 0.01
# Region classification
__C.MEM.C_STD = 0.01
# Feature to memory ratio
__C.MEM.FM_R = 1.
# Value to gate ratio
__C.MEM.VG_R = 1.
# FC to Pool ratio when combing the input
__C.MEM.FP_R = 1.
# Conv kernel size for memory
__C.MEM.CONV = 3
# Canonical region size
__C.MEM.CROP_SIZE = 7
# Context aggregation
__C.MEM.CT_L = 3
__C.MEM.CT_CONV = 3
__C.MEM.CT_FCONV = 3
# Input feature
__C.MEM.IN_L = 2
__C.MEM.IN_CONV = 3
# Memory final fc layer channels
__C.MEM.FC_C = 4096
__C.MEM.FC_L = 2
# The weight for the memory based prediction
__C.MEM.WEIGHT = 1.
__C.MEM.REL_WEIGHT = 1.
# Final supervision weight
__C.MEM.WEIGHT_FINAL = 1.
# The threshold to control the entropy of the distribution
__C.MEM.BETA = .5
# The dimension of predicted tag
__C.MEM.TAG_D = 16
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.RATE = 0.0005
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0001
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_ITERS = 500
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
__C.TRAIN.REL_BATCH_SIZE = 128
__C.TRAIN.POS_REL_FRACTION = 0.5
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
__C.TRAIN.BBOX_TARGET_NORMALIZATION_FILE = 'bbox_distribution.npy'
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
__C.TRAIN.USE_RPN_DB = True
__C.TRAIN.NUM_NEG_RELS = 128
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
__C.VG_DIR = osp.abspath(osp.join(__C.DATA_DIR, 'vg'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Use an end-to-end tensorflow model.
# Note: models in E2E tensorflow mode have only been tested in feed-forward mode,
# but these models are exportable to other tensorflow instances as GraphDef files.
__C.USE_E2E_TF = True
# Default pooling mode, only 'crop' is available
__C.POOLING_MODE = 'crop'
# Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
# Anchor scales for RPN
__C.ANCHOR_SCALES = [8, 16, 32]
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = [0.5, 1, 2]
# Number of filters for the RPN layer
__C.RPN_CHANNELS = 512
__C.BOX_SCALE = 1024
__C.IMG_SCALE = 1024
cfg.BOTTLE_SCALE = 16.0
# EPS, a small number for numerical issue
__C.EPS = 1e-14
__C.GROUP_DIST_THRESH = 20.
__C.PUSH_WEIGHT = 0.1
__C.PULL_WEIGHT = 0.1
def get_output_dir(imdb, weights_filename):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 27
| 91
| 0.710044
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Memory options
#
__C.MEM = edict()
# Number of memory iterations
__C.MEM.ITER = 2
# Height of the memory
__C.MEM.INIT_H = 20
# Width of the memory
__C.MEM.INIT_W = 20
# Channel of the memory
__C.MEM.C = 512
# Basic stds in the memory
__C.MEM.STD = 0.01
# Base stds in the memory update function for input features
__C.MEM.U_STD = 0.01
# Region classification
__C.MEM.C_STD = 0.01
# Feature to memory ratio
__C.MEM.FM_R = 1.
# Value to gate ratio
__C.MEM.VG_R = 1.
# FC to Pool ratio when combing the input
__C.MEM.FP_R = 1.
# Conv kernel size for memory
__C.MEM.CONV = 3
# Canonical region size
__C.MEM.CROP_SIZE = 7
# Context aggregation
__C.MEM.CT_L = 3
__C.MEM.CT_CONV = 3
__C.MEM.CT_FCONV = 3
# Input feature
__C.MEM.IN_L = 2
__C.MEM.IN_CONV = 3
# Memory final fc layer channels
__C.MEM.FC_C = 4096
__C.MEM.FC_L = 2
# The weight for the memory based prediction
__C.MEM.WEIGHT = 1.
__C.MEM.REL_WEIGHT = 1.
# Final supervision weight
__C.MEM.WEIGHT_FINAL = 1.
# The threshold to control the entropy of the distribution
__C.MEM.BETA = .5
# The dimension of predicted tag
__C.MEM.TAG_D = 16
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.RATE = 0.0005
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0001
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_ITERS = 500
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
__C.TRAIN.MAX_SIZE = 1000
__C.TRAIN.IMS_PER_BATCH = 1
__C.TRAIN.BATCH_SIZE = 128
__C.TRAIN.REL_BATCH_SIZE = 128
__C.TRAIN.POS_REL_FRACTION = 0.5
__C.TRAIN.FG_FRACTION = 0.25
__C.TRAIN.FG_THRESH = 0.5
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
__C.TRAIN.USE_FLIPPED = True
__C.TRAIN.BBOX_REG = True
__C.TRAIN.BBOX_THRESH = 0.5
__C.TRAIN.SNAPSHOT_ITERS = 5000
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
__C.TRAIN.BBOX_TARGET_NORMALIZATION_FILE = 'bbox_distribution.npy'
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
__C.TRAIN.PROPOSAL_METHOD = 'gt'
__C.TRAIN.HAS_RPN = True
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
__C.TRAIN.RPN_FG_FRACTION = 0.5
__C.TRAIN.RPN_BATCHSIZE = 256
__C.TRAIN.RPN_NMS_THRESH = 0.7
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
__C.TRAIN.USE_ALL_GT = True
__C.TRAIN.USE_RPN_DB = True
__C.TRAIN.NUM_NEG_RELS = 128
__C.TEST = edict()
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
__C.RNG_SEED = 3
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
__C.VG_DIR = osp.abspath(osp.join(__C.DATA_DIR, 'vg'))
__C.MATLAB = 'matlab'
__C.EXP_DIR = 'default'
__C.USE_GPU_NMS = True
__C.USE_E2E_TF = True
__C.POOLING_MODE = 'crop'
__C.POOLING_SIZE = 7
__C.ANCHOR_SCALES = [8, 16, 32]
__C.ANCHOR_RATIOS = [0.5, 1, 2]
__C.RPN_CHANNELS = 512
__C.BOX_SCALE = 1024
__C.IMG_SCALE = 1024
cfg.BOTTLE_SCALE = 16.0
__C.EPS = 1e-14
__C.GROUP_DIST_THRESH = 20.
__C.PUSH_WEIGHT = 0.1
__C.PULL_WEIGHT = 0.1
def get_output_dir(imdb, weights_filename):
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
if type(a) is not edict:
return
for k, v in a.items():
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| true
| true
|
f715773b79dedecb2423d1c8a82ee28a03b25ac1
| 2,009
|
py
|
Python
|
tools.py
|
VieVie31/face_detection
|
fea010faedcad038f908bdab559eeb0f18ee5063
|
[
"MIT"
] | 4
|
2017-10-19T07:41:25.000Z
|
2018-11-03T16:10:16.000Z
|
tools.py
|
VieVie31/face_detection
|
fea010faedcad038f908bdab559eeb0f18ee5063
|
[
"MIT"
] | null | null | null |
tools.py
|
VieVie31/face_detection
|
fea010faedcad038f908bdab559eeb0f18ee5063
|
[
"MIT"
] | null | null | null |
import os
import re
import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
def read_pgm(filename, byteorder='>'):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return np.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
def imread(filename):
if filename[:-4] == 'pgm':
return read_pgm(filename)
else:
return cv2.imread(filename, 0)
def normalize(t):
return (t - t.mean()) / t.std()
def sliding_window(image, stepSize, windowSize):
for y in xrange(0, image.shape[0], stepSize):
for x in xrange(0, image.shape[1], stepSize):
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def pyramid(image, min_size=64, step=0.75):
w, h = image.shape
yield image
while min(w, h) > min_size:
w, h = image.shape
image = cv2.resize(image, (int(h * step), int(w * step)))
yield image
def distance(a, b):
return sum((a - b)**2) ** .5
def random_split(dataset, training_proportion):
random.shuffle(dataset)
return (
dataset[:int(training_proportion * len(dataset))],
dataset[int(training_proportion * len(dataset)):])
def hist_256(t):
hist = [0] * 256
for v in t:
hist[int(v)] += 1
return hist
def shuffled(lst):
random.shuffle(lst)
return lst
| 28.295775
| 77
| 0.558487
|
import os
import re
import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
def read_pgm(filename, byteorder='>'):
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return np.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
def imread(filename):
if filename[:-4] == 'pgm':
return read_pgm(filename)
else:
return cv2.imread(filename, 0)
def normalize(t):
return (t - t.mean()) / t.std()
def sliding_window(image, stepSize, windowSize):
for y in xrange(0, image.shape[0], stepSize):
for x in xrange(0, image.shape[1], stepSize):
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def pyramid(image, min_size=64, step=0.75):
w, h = image.shape
yield image
while min(w, h) > min_size:
w, h = image.shape
image = cv2.resize(image, (int(h * step), int(w * step)))
yield image
def distance(a, b):
return sum((a - b)**2) ** .5
def random_split(dataset, training_proportion):
random.shuffle(dataset)
return (
dataset[:int(training_proportion * len(dataset))],
dataset[int(training_proportion * len(dataset)):])
def hist_256(t):
hist = [0] * 256
for v in t:
hist[int(v)] += 1
return hist
def shuffled(lst):
random.shuffle(lst)
return lst
| true
| true
|
f71578c338458c847d71d9fa063b9ac9dfebe6cd
| 5,541
|
py
|
Python
|
Sporter/test_leeftijdsklassen.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-12-22T13:11:12.000Z
|
2021-12-22T13:11:12.000Z
|
Sporter/test_leeftijdsklassen.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | 9
|
2020-10-28T07:07:05.000Z
|
2021-06-28T20:05:37.000Z
|
Sporter/test_leeftijdsklassen.py
|
RamonvdW/nhb-apps
|
5a9f840bfe066cd964174515c06b806a7b170c69
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.test import TestCase
from django.utils import timezone
from NhbStructuur.models import NhbRegio, NhbVereniging
from .leeftijdsklassen import bereken_leeftijdsklassen
from .models import Sporter
from TestHelpers.e2ehelpers import E2EHelpers
import datetime
class TestSporterLeeftijdsklassen(E2EHelpers, TestCase):
""" unit tests voor de Schutter applicatie, module Leeftijdsklassen """
def setUp(self):
""" initialisatie van de test case """
self.account_admin = self.e2e_create_account_admin()
self.account_normaal = self.e2e_create_account('normaal', 'normaal@test.com', 'Normaal')
self.account_geenlid = self.e2e_create_account('geenlid', 'geenlid@test.com', 'Geen')
# maak een test vereniging
ver = NhbVereniging()
ver.naam = "Grote Club"
ver.ver_nr = "1000"
ver.regio = NhbRegio.objects.get(pk=111)
# secretaris kan nog niet ingevuld worden
ver.save()
# maak een test lid aan
sporter = Sporter()
sporter.lid_nr = 100001
sporter.geslacht = "M"
sporter.voornaam = "Ramon"
sporter.achternaam = "de Tester"
sporter.geboorte_datum = datetime.date(year=1972, month=3, day=4)
sporter.sinds_datum = datetime.date(year=2010, month=11, day=12)
sporter.bij_vereniging = ver
sporter.account = self.account_normaal
sporter.email = sporter.account.email
sporter.save()
self.sporter1 = sporter
# maak een test lid aan
sporter = Sporter()
sporter.lid_nr = 100002
sporter.geslacht = "V"
sporter.voornaam = "Ramona"
sporter.achternaam = "de Testerin"
sporter.email = ""
sporter.geboorte_datum = datetime.date(year=1972, month=3, day=4)
sporter.sinds_datum = datetime.date(year=2010, month=11, day=12)
sporter.bij_vereniging = ver
sporter.save()
def test_leeftijdsklassen(self):
now = timezone.now() # is in UTC
now = timezone.localtime(now) # convert to active timezone (say Europe/Amsterdam)
huidige_jaar = now.year
# aspirant
tup = bereken_leeftijdsklassen(huidige_jaar - 9)
self.assertEqual(tup, (huidige_jaar,
9,
['Aspirant', 'Aspirant', 'Aspirant', 'Aspirant', 'Aspirant'],
['Aspiranten <11 jaar', 'Aspiranten <11 jaar', 'Aspiranten <11 jaar', 'Aspiranten 11-12 jaar', 'Aspiranten 11-12 jaar'],
'Aspirant'))
# cadet (14..17)
tup = bereken_leeftijdsklassen(huidige_jaar - 13)
self.assertEqual(tup, (huidige_jaar,
13,
['Aspirant', 'Aspirant', 'Cadet', 'Cadet', 'Cadet'],
['Aspiranten 11-12 jaar', 'Cadetten', 'Cadetten', 'Cadetten', 'Cadetten'],
'Cadet'))
# junior (18..20)
tup = bereken_leeftijdsklassen(huidige_jaar - 18)
self.assertEqual(tup, (huidige_jaar,
18,
['Cadet', 'Junior', 'Junior', 'Junior', 'Senior'],
['Junioren', 'Junioren', 'Junioren', 'Senioren', 'Senioren'],
'Junior'))
# senior
tup = bereken_leeftijdsklassen(huidige_jaar - 21)
self.assertEqual(tup, (huidige_jaar,
21,
['Junior', 'Senior', 'Senior', 'Senior', 'Senior'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
# master
tup = bereken_leeftijdsklassen(huidige_jaar - 50)
self.assertEqual(tup, (huidige_jaar,
50,
['Senior', 'Master', 'Master', 'Master', 'Master'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
# veteraan
tup = bereken_leeftijdsklassen(huidige_jaar - 60)
self.assertEqual(tup, (huidige_jaar,
60,
['Master', 'Veteraan', 'Veteraan', 'Veteraan', 'Veteraan'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
def test_view(self):
# zonder login
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/', follow=True)
self.assert403(resp)
# inlog, geen NHB lid
self.e2e_login(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/')
self.assert403(resp)
# schutter
self.e2e_login(self.account_normaal)
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/')
self.assertEqual(resp.status_code, 200) # 200 = OK
self.assert_html_ok(resp)
self.assert_template_used(resp, ('sporter/leeftijdsklassen.dtl', 'plein/site_layout.dtl'))
self.e2e_assert_other_http_commands_not_supported('/sporter/leeftijdsklassen/')
# end of file
| 41.044444
| 151
| 0.562714
|
from django.test import TestCase
from django.utils import timezone
from NhbStructuur.models import NhbRegio, NhbVereniging
from .leeftijdsklassen import bereken_leeftijdsklassen
from .models import Sporter
from TestHelpers.e2ehelpers import E2EHelpers
import datetime
class TestSporterLeeftijdsklassen(E2EHelpers, TestCase):
def setUp(self):
self.account_admin = self.e2e_create_account_admin()
self.account_normaal = self.e2e_create_account('normaal', 'normaal@test.com', 'Normaal')
self.account_geenlid = self.e2e_create_account('geenlid', 'geenlid@test.com', 'Geen')
ver = NhbVereniging()
ver.naam = "Grote Club"
ver.ver_nr = "1000"
ver.regio = NhbRegio.objects.get(pk=111)
ver.save()
sporter = Sporter()
sporter.lid_nr = 100001
sporter.geslacht = "M"
sporter.voornaam = "Ramon"
sporter.achternaam = "de Tester"
sporter.geboorte_datum = datetime.date(year=1972, month=3, day=4)
sporter.sinds_datum = datetime.date(year=2010, month=11, day=12)
sporter.bij_vereniging = ver
sporter.account = self.account_normaal
sporter.email = sporter.account.email
sporter.save()
self.sporter1 = sporter
sporter = Sporter()
sporter.lid_nr = 100002
sporter.geslacht = "V"
sporter.voornaam = "Ramona"
sporter.achternaam = "de Testerin"
sporter.email = ""
sporter.geboorte_datum = datetime.date(year=1972, month=3, day=4)
sporter.sinds_datum = datetime.date(year=2010, month=11, day=12)
sporter.bij_vereniging = ver
sporter.save()
def test_leeftijdsklassen(self):
now = timezone.now()
now = timezone.localtime(now)
huidige_jaar = now.year
tup = bereken_leeftijdsklassen(huidige_jaar - 9)
self.assertEqual(tup, (huidige_jaar,
9,
['Aspirant', 'Aspirant', 'Aspirant', 'Aspirant', 'Aspirant'],
['Aspiranten <11 jaar', 'Aspiranten <11 jaar', 'Aspiranten <11 jaar', 'Aspiranten 11-12 jaar', 'Aspiranten 11-12 jaar'],
'Aspirant'))
tup = bereken_leeftijdsklassen(huidige_jaar - 13)
self.assertEqual(tup, (huidige_jaar,
13,
['Aspirant', 'Aspirant', 'Cadet', 'Cadet', 'Cadet'],
['Aspiranten 11-12 jaar', 'Cadetten', 'Cadetten', 'Cadetten', 'Cadetten'],
'Cadet'))
tup = bereken_leeftijdsklassen(huidige_jaar - 18)
self.assertEqual(tup, (huidige_jaar,
18,
['Cadet', 'Junior', 'Junior', 'Junior', 'Senior'],
['Junioren', 'Junioren', 'Junioren', 'Senioren', 'Senioren'],
'Junior'))
tup = bereken_leeftijdsklassen(huidige_jaar - 21)
self.assertEqual(tup, (huidige_jaar,
21,
['Junior', 'Senior', 'Senior', 'Senior', 'Senior'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
tup = bereken_leeftijdsklassen(huidige_jaar - 50)
self.assertEqual(tup, (huidige_jaar,
50,
['Senior', 'Master', 'Master', 'Master', 'Master'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
tup = bereken_leeftijdsklassen(huidige_jaar - 60)
self.assertEqual(tup, (huidige_jaar,
60,
['Master', 'Veteraan', 'Veteraan', 'Veteraan', 'Veteraan'],
['Senioren', 'Senioren', 'Senioren', 'Senioren', 'Senioren'],
'Senior'))
def test_view(self):
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/', follow=True)
self.assert403(resp)
self.e2e_login(self.account_admin)
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/')
self.assert403(resp)
self.e2e_login(self.account_normaal)
with self.assert_max_queries(20):
resp = self.client.get('/sporter/leeftijdsklassen/')
self.assertEqual(resp.status_code, 200)
self.assert_html_ok(resp)
self.assert_template_used(resp, ('sporter/leeftijdsklassen.dtl', 'plein/site_layout.dtl'))
self.e2e_assert_other_http_commands_not_supported('/sporter/leeftijdsklassen/')
| true
| true
|
f71579a7221e41b1b3740a7e825aa1b7bae7267b
| 6,193
|
py
|
Python
|
test/test_addressspace.py
|
dendisuhubdy/coriander
|
7df182981e5c4a8e043fea25d272d025a953f06d
|
[
"Apache-2.0"
] | 644
|
2017-05-21T05:25:20.000Z
|
2022-03-25T04:18:14.000Z
|
test/test_addressspace.py
|
hughperkins/cuda-ir-to-opencl
|
7c6b65bc08a25a6bce21efe7b86be8fa985597af
|
[
"Apache-2.0"
] | 82
|
2017-05-21T15:19:24.000Z
|
2022-01-30T01:41:44.000Z
|
test/test_addressspace.py
|
hughperkins/cuda-ir-to-opencl
|
7c6b65bc08a25a6bce21efe7b86be8fa985597af
|
[
"Apache-2.0"
] | 88
|
2017-05-21T01:31:16.000Z
|
2022-01-31T09:28:17.000Z
|
# Copyright Hugh Perkins 2016
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pyopencl as cl
import os
import subprocess
from test import test_common
from test.test_common import offset_type
def test_getelementptr_struct_local(context, q, float_data, float_data_gpu):
cu_source = """
struct MyStruct {
float* f0;
float* f1;
};
__global__ void foo(float *data) {
struct MyStruct astruct;
float *floats = astruct.f0;
}
"""
kernelName = test_common.mangle('foo', ['float *'])
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
# float_data_orig = np.copy(float_data)
# kernel(q, (32,), (32,), float_data_gpu, offset_type(0), cl.LocalMemory(4))
# cl.enqueue_copy(q, float_data, float_data_gpu)
# q.finish()
# # print('before', float_data_orig[:5])
# print('after', float_data[:5])
# assert np.abs(float_data_orig[1:32] - float_data[0:31]).max() <= 1e-4
def test_getelementptr_struct_global(context, q, float_data, float_data_gpu):
cu_source = """
struct MyStruct {
float* f0;
float* f1;
};
__global__ void foo(struct MyStruct mystruct) {
float *floats = mystruct.f0;
}
"""
# kernelName = test_common.mangle('foo', ['float *'])
kernelName = '_Z3foo8MyStruct'
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=3)
print('cl_sourcecode', cl_sourcecode)
kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
# float_data_orig = np.copy(float_data)
# kernel(q, (32,), (32,), float_data_gpu, offset_type(0), cl.LocalMemory(4))
# cl.enqueue_copy(q, float_data, float_data_gpu)
# q.finish()
# # print('before', float_data_orig[:5])
# print('after', float_data[:5])
# # assert np.abs(float_data_orig[1:32] - float_data[0:31]).max() <= 1e-4
def test_kernelparam_ll(context, q, float_data, float_data_gpu):
ll_code = """define void @mykernel(float * %p1) {
ret void
}
"""
cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
assert len([l for l in cl_sourcecode.split('\n') if l.strip().startswith('global float* p1')]) == 1
def test_load_globalfloatstar(context, q, float_data, float_data_gpu):
ll_code = """define void @mykernel(float * %p1) {
%1 = load float, float* %p1
ret void
}
"""
cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
assert len([l for l in cl_sourcecode.split('\n') if l.strip() == 'float v2;']) == 1
def x_test_play(context, q, float_data, float_data_gpu):
cu_source = """
__device__ void process(float *data) {
*data = 5.0f;
}
__device__ float process2(float value) {
process(&value);
return value;
}
__global__ void mykernel(float *data) {
float v = data[0];
float *v1 = &v;
*v1 = 5.0f;
data[0] = v;
data[0] = process2(data[0]);
}
"""
kernelName = test_common.mangle('mykernel', ['float *'])
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
# kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
# %1 = load float, float* %p1
# %2 = getelementptr
# def test_addr_of_float(context, q, float_data, float_data_gpu):
# ll_code = """define void @mykernel(float * %p1) {
# %1 = alloca float
# %2 = getelementptr float, float* %1, i64 0
# %3 = load float, float* %2
# %4 = getelementptr float, float* %1
# %5 = load float, float* %4
# ret void
# }
# """
# cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
# print('cl_sourcecode', cl_sourcecode)
# assert len([l for l in cl_sourcecode.split('\n') if l.strip() == 'float v2;']) == 1
def test_addr_of_float(context, q, float_data, float_data_gpu):
cu_code = """
__attribute__((global)) void mykernel(float *data) {
float v = data[0];
float *v1 = &v;
*v1 = 5.0f;
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernelPf', num_clmems=1)
print('cl_code', cl_code)
def test_struct_byval(context, q, float_data, float_data_gpu):
cu_code = """
struct MyStruct {
float afloat;
int anint;
float *floatpointer;
float **floatstarstar;
};
__attribute__((global)) void mykernel(struct MyStruct myStruct) {
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernel8MyStruct', num_clmems=1)
print('cl_code', cl_code)
def test_internal_struct(context, q, float_data, float_data_gpu):
cu_code = """
struct MyStruct {
float afloat;
int anint;
float *floatpointer;
// float **floatstarstart;
};
__attribute__((device)) void processStruct(MyStruct *myStruct) {
myStruct->afloat = myStruct->floatpointer[0];
}
__attribute__((global)) void mykernel(float *data) {
float afloat = data[0];
float float2 = data[1];
struct MyStruct myStruct = { afloat, 3, &float2 };
processStruct(&myStruct);
data[2] = myStruct.afloat;
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernelPf', num_clmems=1)
print('cl_code', cl_code)
| 32.088083
| 103
| 0.676893
|
import numpy as np
import pyopencl as cl
import os
import subprocess
from test import test_common
from test.test_common import offset_type
def test_getelementptr_struct_local(context, q, float_data, float_data_gpu):
cu_source = """
struct MyStruct {
float* f0;
float* f1;
};
__global__ void foo(float *data) {
struct MyStruct astruct;
float *floats = astruct.f0;
}
"""
kernelName = test_common.mangle('foo', ['float *'])
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
ruct_global(context, q, float_data, float_data_gpu):
cu_source = """
struct MyStruct {
float* f0;
float* f1;
};
__global__ void foo(struct MyStruct mystruct) {
float *floats = mystruct.f0;
}
"""
kernelName = '_Z3foo8MyStruct'
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=3)
print('cl_sourcecode', cl_sourcecode)
kernel = test_common.build_kernel(context, cl_sourcecode, kernelName)
ykernel(float * %p1) {
ret void
}
"""
cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
assert len([l for l in cl_sourcecode.split('\n') if l.strip().startswith('global float* p1')]) == 1
def test_load_globalfloatstar(context, q, float_data, float_data_gpu):
ll_code = """define void @mykernel(float * %p1) {
%1 = load float, float* %p1
ret void
}
"""
cl_sourcecode = test_common.ll_to_cl(ll_code, "mykernel", num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
assert len([l for l in cl_sourcecode.split('\n') if l.strip() == 'float v2;']) == 1
def x_test_play(context, q, float_data, float_data_gpu):
cu_source = """
__device__ void process(float *data) {
*data = 5.0f;
}
__device__ float process2(float value) {
process(&value);
return value;
}
__global__ void mykernel(float *data) {
float v = data[0];
float *v1 = &v;
*v1 = 5.0f;
data[0] = v;
data[0] = process2(data[0]);
}
"""
kernelName = test_common.mangle('mykernel', ['float *'])
cl_sourcecode = test_common.cu_to_cl(cu_source, kernelName, num_clmems=1)
print('cl_sourcecode', cl_sourcecode)
# %1 = alloca float
# %2 = getelementptr float, float* %1, i64 0
# %3 = load float, float* %2
# %4 = getelementptr float, float* %1
# %5 = load float, float* %4
# ret void
# }
# """
def test_addr_of_float(context, q, float_data, float_data_gpu):
cu_code = """
__attribute__((global)) void mykernel(float *data) {
float v = data[0];
float *v1 = &v;
*v1 = 5.0f;
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernelPf', num_clmems=1)
print('cl_code', cl_code)
def test_struct_byval(context, q, float_data, float_data_gpu):
cu_code = """
struct MyStruct {
float afloat;
int anint;
float *floatpointer;
float **floatstarstar;
};
__attribute__((global)) void mykernel(struct MyStruct myStruct) {
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernel8MyStruct', num_clmems=1)
print('cl_code', cl_code)
def test_internal_struct(context, q, float_data, float_data_gpu):
cu_code = """
struct MyStruct {
float afloat;
int anint;
float *floatpointer;
// float **floatstarstart;
};
__attribute__((device)) void processStruct(MyStruct *myStruct) {
myStruct->afloat = myStruct->floatpointer[0];
}
__attribute__((global)) void mykernel(float *data) {
float afloat = data[0];
float float2 = data[1];
struct MyStruct myStruct = { afloat, 3, &float2 };
processStruct(&myStruct);
data[2] = myStruct.afloat;
}
"""
ll_code = test_common.cu_to_devicell_noopt(cu_code)
print('ll_code', 'define ' + ll_code.split('define ')[1].split('}')[0] + '}')
cl_code = test_common.ll_to_cl(ll_code, '_Z8mykernelPf', num_clmems=1)
print('cl_code', cl_code)
| true
| true
|
f7157a1710b2e208b523118567fc8e95d752447c
| 16,786
|
py
|
Python
|
RL_TD3/src/pe_model.py
|
Crazy-Jack/RL4GRN
|
e683e17758eb468bd42e0ea0020e2246051c258c
|
[
"MIT"
] | null | null | null |
RL_TD3/src/pe_model.py
|
Crazy-Jack/RL4GRN
|
e683e17758eb468bd42e0ea0020e2246051c258c
|
[
"MIT"
] | null | null | null |
RL_TD3/src/pe_model.py
|
Crazy-Jack/RL4GRN
|
e683e17758eb468bd42e0ea0020e2246051c258c
|
[
"MIT"
] | 1
|
2020-12-14T09:32:36.000Z
|
2020-12-14T09:32:36.000Z
|
'''
The probabilistic ensemble dynamics model
'''
# pylint: disable=C0103, R0902, R0913, W0201, E0401, E1120
import time
import itertools
import numpy as np
import tensorflow as tf
from tensorflow import keras
from collections import defaultdict
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class PEModel(keras.Model):
'''
An individual Probabilistic Neural Network.
Multiple Networks with identical structure form the Probabilistic Ensemble.
Notice that each PEModel network predicts the mean and variance of
reward, done, delta_state in order.
Therefore, the output layer has (state_dim + 1 + 1) * 2
'''
def __init__(self, state_dim, action_dim):
super().__init__()
self.l1 = keras.layers.Dense(256, activation="relu")
self.l2 = keras.layers.Dense(256, activation="relu")
self.l3 = keras.layers.Dense(256, activation="relu")
# mean and variance for reward, done, delta_state (in this order)
# Note: we change done to not_done
self.l4 = keras.layers.Dense((state_dim + 2) * 2)
# this step to populate trainable_weights. Without this step,
# PE.trainable_weights will be empty.
self.forward(np.zeros((1, state_dim + action_dim)))
def forward(self, net_input):
'''
Calls the network on a batch of inputs.
net_input should have size (batch_size, state_dim+action_dim)
'''
out = self.l1(net_input)
out = self.l2(out)
out = self.l3(out)
out = self.l4(out)
return out
class PE():
'''
The probabilistic ensemble dynamics model class.
Contains code to initialize, train and then predict with the ensemble.
You will implement part of this class.
'''
def __init__(
self,
state_dim,
action_dim,
num_networks = 7,
num_elites = 5,
learning_rate = 1e-3,
):
self.num_networks = num_networks
self.num_elites = num_elites
self.networks = [PEModel(state_dim, action_dim) for i in range(num_networks)]
self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
self.state_dim = state_dim
self.action_dim = action_dim
self.output_dim = state_dim + 2
# For smoothing the log-variance output
self.max_logvar = tf.convert_to_tensor(-3 * np.ones([1, self.state_dim + 2]), \
dtype=tf.float32)
self.min_logvar = tf.convert_to_tensor(-7 * np.ones([1, self.state_dim + 2]), \
dtype=tf.float32)
self.total_it = 0
self._model_inds = list(range(self.num_networks)) # for choosing elite models in inference!
def get_output(self, output, ret_logvar=False):
"""
output: tf tensor, shape (batch_size, (state_dim+2) * 2)
Given network outputs, returns mean and log variance tf tensors if ret_logvar = True.
mean: shape (batch_size, state_dim + 2)
logvar: shape (batch_size, state_dim + 2)
Do not modify
"""
mean = output[:, 0:self.output_dim]
raw_v = output[:, self.output_dim:]
# Log variance smoothing
logvar = self.max_logvar - tf.math.softplus(self.max_logvar - raw_v)
logvar = self.min_logvar + tf.math.softplus(logvar - self.min_logvar)
if ret_logvar: # for training
return mean, logvar
return mean, tf.math.exp(logvar) # for testing
def _train_loss_one(self, network, train_in, train_targ):
'''
Compute the MLE Training Loss for a given Probabilistic Neural Network.
train_in: tf tensor, shape (batch_size, state_dim + action_dim)
tarin_targ: tf tensor, shape (batch_size, state_dim + 2), target output
This function should compute the Gaussian MLE loss, summed across the entire batch.
User note: this contain not done!!
'''
# raise NotImplementedError
pred_mean, pred_var = self.get_output(network.forward(train_in), ret_logvar=True)
train_loss = (pred_mean - train_targ) ** 2 / tf.math.exp(pred_var) + pred_var # [batch_size, state_dim + 2]
train_loss = tf.math.reduce_sum(train_loss)
# regularization step. populate train_loss with correct Gaussian MLE loss
train_loss += 0.01 * (tf.math.reduce_sum(self.max_logvar) - \
tf.math.reduce_sum(self.min_logvar))
return train_loss
def _MSE_loss(self, valid_in, valid_targ, final=False):
"""
Computes the MSE loss for each Probabilistic Neural Network, for validation only.
valid_in: tf tensor, shape (batch_size, state_dim + action_dim), validation input
valid_targ: tf tensor, shape (batch_size, state_dim + 2), validation target
Do not modify.
"""
mse_losses = np.zeros(self.num_networks)
rew_losses = np.zeros(self.num_networks)
not_done_losses = np.zeros(self.num_networks)
dynamics_losses = np.zeros(self.num_networks)
for i, network in enumerate(self.networks):
mean, _ = self.get_output(network.forward(valid_in), ret_logvar=True)
if final:
mse_loss = tf.reduce_mean(((mean - valid_targ) ** 2), 0)
rew_loss = mse_loss[0]
not_done_loss = mse_loss[1]
dynamics_loss = tf.reduce_mean(mse_loss[2:], 0)
mse_losses[i] = tf.reduce_mean(mse_loss, 0)
rew_losses[i] = rew_loss
not_done_losses[i] = not_done_loss
dynamics_losses[i] = dynamics_loss
else:
mse_loss = tf.reduce_mean((mean - valid_targ) ** 2, 0)
mse_losses[i] = tf.reduce_mean(mse_loss, 0)
if final:
return mse_losses, rew_losses, not_done_losses, dynamics_losses
return mse_losses
def _prepare_dataset(self, buffer):
'''
Given a replay buffer containing real environment transitions,
prepare a dataset for training the PE of neural networks.
The dataset contains ALL transitions in the replay buffer.
Do not modify.
inputs: tf tensor, shape (buffer_size, state_dim + action_dim)
targets: tf tensor, shape (buffer_size, state_dim + 2)
'''
state, action, next_state, reward, not_done = buffer.sample_all() # already shuffled
delta_state = next_state - state
inputs = tf.concat((state, action), -1)
targets = tf.concat((reward, not_done, delta_state), -1)
# Both TF tensors
return inputs, targets
def _start_train(self, max_epochs_since_update):
'''
Setup some internal bookkeeping variables to determine convergence.
Do not modify.
'''
self._snapshots = np.array([1e10 for i in range(self.num_networks)])
self._epochs_since_update = 0
self._max_epochs_since_update = max_epochs_since_update
def _end_train(self):
'''
Book keeping and console output. Do not modify.
'''
sorted_inds = np.argsort(self._snapshots)
self._model_inds = sorted_inds[:self.num_elites].tolist() # first elite models
print('Final holdout_losses: ', self._snapshots)
print('Model MSE', np.mean(self._snapshots[self._model_inds]))
print('Rew MSE', np.mean(self._reward_mse[self._model_inds]))
print('Not Done MSE', np.mean(self._not_done_mse[self._model_inds]))
print('Dyn MSE', np.mean(self._dynamics_mse[self._model_inds]))
def _save_best(self, epoch, holdout_losses):
'''
Determines the stopping condition for PE model training.
The training is determined to have converged if for max_epochs_since_update epochs,
no network in the ensemble has improved for more than 1%.
Do not modify.
'''
updated = False
for i in range(len(holdout_losses)):
current = holdout_losses[i]
best = self._snapshots[i]
improvement = (best - current) / best
if improvement > 0.01: # if decrease over 1%, save
self._snapshots[i] = current
#self._save_model(i)
updated = True
# improvement = (best - current) / best
print('epoch {} | updated {} | improvement: {:.4f} | best: {:.4f} | current: {:.4f}'.format(\
epoch, i, improvement, best, current))
if updated:
self._epochs_since_update = 0
else:
self._epochs_since_update += 1
if self._epochs_since_update > self._max_epochs_since_update:
print('[ PE ] Breaking at epoch {}: {} epochs since update ({} max)'.format(epoch,
self._epochs_since_update, self._max_epochs_since_update))
return True
else:
return False
def train(self, buffer, batch_size=256, holdout_ratio=0.2, max_logging=5000,
max_grad_updates=None, max_t=None, max_epochs_since_update=5):
'''
For model training, uses all transitions in real buffer, and train to convergence
in valid set. You will implement part of this training function.
'''
self._start_train(max_epochs_since_update)
inputs, targets = self._prepare_dataset(buffer)
# Split into training and holdout sets
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
inputs, holdout_inputs = inputs[num_holdout:], inputs[:num_holdout]
targets, holdout_targets = targets[num_holdout:], targets[:num_holdout]
print('[ Euler PE ] Training {} | Target {} | Holdout: {}'.format(inputs.shape, targets.shape,
holdout_inputs.shape))
idxs = tf.convert_to_tensor(np.random.randint(inputs.shape[0], size=(inputs.shape[0],)))
num_batch = int(np.ceil(idxs.shape[-1] / batch_size))
# global counter
t0 = time.time()
grad_updates = 0
for epoch in itertools.count(): # infinite loop
for batch_num in range(num_batch):
batch_idxs = idxs[batch_num * batch_size:(batch_num + 1) * batch_size]
# (N, <=B): will include the remainder batch even if out of bounds!
train_in = tf.gather(inputs, batch_idxs)
train_targ = tf.gather(targets, batch_idxs)
# For each network, get loss, compute gradient of loss
# And apply optimizer step.
# raise NotImplementedError
for network in self.networks:
with tf.GradientTape() as tape:
train_loss = self._train_loss_one(network, train_in, train_targ)
network_grad = tape.gradient(train_loss, network.trainable_variables)
self.optimizer.apply_gradients(zip(network_grad, network.trainable_variables))
grad_updates += 1
idxs = tf.random.shuffle(idxs) # shuffle its dataset for each model
# validate each model using same valid set
holdout_losses = self._MSE_loss(holdout_inputs, holdout_targets) # (N,)
break_train = self._save_best(epoch, holdout_losses)
print("[ PE ] holdout_losses: ", f"Epoch {epoch}", holdout_losses) # write to log.txt
t = time.time() - t0
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
if max_t and t > max_t:
print('Breaking because of timeout: {}! (max: {})'.format(t, max_t))
break
self._snapshots, self._reward_mse, self._not_done_mse, self._dynamics_mse \
= self._MSE_loss(holdout_inputs, holdout_targets, final=True)
self._end_train()
print(f"End of Model training {epoch} epochs and time {t:.0f}s")
print('Model training epoch', epoch)
print('Model training time', int(t))
return grad_updates
### Rollout / Inference Code
def _prepare_input(self, state, action):
'''
Prepares inputs for inference.
state: tf tensor, size (batch_size, state_dim) or (state_dim, )
action: tf tensor, size (batch_size, action_dim) or (action_dim, )
inputs: tf tensor, size (batch_size, state_dim + action_dim)
Do not modify.
'''
if state.ndim == 1:
state = tf.expand_dims(state, 0)
if action.ndim == 1:
action = tf.expand_dims(action, 0) \
if action.shape[0] == self.action_dim else tf.expand_dims(action, 1)
inputs = tf.concat((state, action), -1)
assert inputs.ndim == 2
return inputs
def _random_inds(self, batch_size):
'''
Uniformly randomly pick one *elite* model for each (state, action) in batch.
This may help you implement predict.
'''
inds = np.random.choice(self._model_inds, size=batch_size)
return inds
def predict(self, state, action, deterministic=False):
'''
Predicts next states, rewards and not_done using the probabilistic ensemble
For each (state, action) pair, pick a elite model uniformly at random, then
use that elite model to predict next state, reward and not_done. The model
can de different for each sample in the batch.
If deterministic=True, then the prediction should simply be the predicted mean.
If deterministic=False, then the prediction should be sampled from N(mean, var),
where mean is the predicted mean and var is the predicted variance.
state: tf tensor, shape (batch_size, state_dim) or (state_dim, )
action: tf tensor, shape (batch_size, action_dim) or (action_dim, )
samples (return value): np array, shape (batch_size, state_dim+2)
samples[:, 0] should be the rewards, samples[:, 1] should be the not-done signals,
and samples[:, 2:] should be the next states.
'''
inputs = self._prepare_input(state, action)
# raise NotImplementedError
batch_size = state.shape[0] if len(state.shape) > 1 else 1
inds = self._random_inds(batch_size) # get random idx
# group idx by network number -> network_number: list(random idx)
network_2_batch_mapping = defaultdict(list)
for batch_number, model_idx in enumerate(inds):
network_2_batch_mapping[model_idx].append(batch_number)
# model forward (for loop by network)
samples = [0] * batch_size
for model_idx, batch_numbers in network_2_batch_mapping.items():
model_inputs = tf.gather_nd(inputs, [[i] for i in batch_numbers])
pred_mean, pred_var = self.get_output(self.networks[model_idx].forward(model_inputs), ret_logvar=False)
zeros_padding = tf.zeros([len(batch_numbers), 2])
cur_state = tf.concat([zeros_padding, tf.gather_nd(state, [[i] for i in batch_numbers])], 1)
pred_mean = pred_mean + cur_state
if deterministic == True:
for idx, bi in enumerate(batch_numbers):
samples[bi] = pred_mean[idx, :]
else:
for idx, bi in enumerate(batch_numbers):
samples[bi] = tf.random.normal(shape = (1, self.state_dim + 2), mean = pred_mean[idx,:], stddev = tf.sqrt(pred_var[idx,:]))
samples = tf.squeeze(tf.convert_to_tensor(samples), 1)
# zeros_padding = tf.zeros([batch_size, 2])
# padded_state_only = tf.concat([zeros_padding, state], 1)
# samples += padded_state_only
return samples
# Sanity Check to test your PE model implementation.
if __name__ == '__main__':
import pybullet_envs
import gym
import utils
env = gym.make("InvertedPendulumBulletEnv-v0")
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
replay_buffer = utils.ReplayBuffer(state_size, action_size, max_size=int(1e6))
o = env.reset()
total_steps = 25000 # one episode has 1000 steps
step = 0
while step < total_steps:
a = env.action_space.sample()
o2, r, d, info = env.step(a)
step += 1
replay_buffer.add(o, a, o2, r, float(d))
o = o2
if d:
o = env.reset()
model = PE(state_size, action_size)
model.train(replay_buffer)
| 43.041026
| 143
| 0.614083
|
import time
import itertools
import numpy as np
import tensorflow as tf
from tensorflow import keras
from collections import defaultdict
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class PEModel(keras.Model):
def __init__(self, state_dim, action_dim):
super().__init__()
self.l1 = keras.layers.Dense(256, activation="relu")
self.l2 = keras.layers.Dense(256, activation="relu")
self.l3 = keras.layers.Dense(256, activation="relu")
self.l4 = keras.layers.Dense((state_dim + 2) * 2)
self.forward(np.zeros((1, state_dim + action_dim)))
def forward(self, net_input):
out = self.l1(net_input)
out = self.l2(out)
out = self.l3(out)
out = self.l4(out)
return out
class PE():
def __init__(
self,
state_dim,
action_dim,
num_networks = 7,
num_elites = 5,
learning_rate = 1e-3,
):
self.num_networks = num_networks
self.num_elites = num_elites
self.networks = [PEModel(state_dim, action_dim) for i in range(num_networks)]
self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
self.state_dim = state_dim
self.action_dim = action_dim
self.output_dim = state_dim + 2
self.max_logvar = tf.convert_to_tensor(-3 * np.ones([1, self.state_dim + 2]), \
dtype=tf.float32)
self.min_logvar = tf.convert_to_tensor(-7 * np.ones([1, self.state_dim + 2]), \
dtype=tf.float32)
self.total_it = 0
self._model_inds = list(range(self.num_networks))
def get_output(self, output, ret_logvar=False):
mean = output[:, 0:self.output_dim]
raw_v = output[:, self.output_dim:]
logvar = self.max_logvar - tf.math.softplus(self.max_logvar - raw_v)
logvar = self.min_logvar + tf.math.softplus(logvar - self.min_logvar)
if ret_logvar:
return mean, logvar
return mean, tf.math.exp(logvar)
def _train_loss_one(self, network, train_in, train_targ):
pred_mean, pred_var = self.get_output(network.forward(train_in), ret_logvar=True)
train_loss = (pred_mean - train_targ) ** 2 / tf.math.exp(pred_var) + pred_var
train_loss = tf.math.reduce_sum(train_loss)
train_loss += 0.01 * (tf.math.reduce_sum(self.max_logvar) - \
tf.math.reduce_sum(self.min_logvar))
return train_loss
def _MSE_loss(self, valid_in, valid_targ, final=False):
mse_losses = np.zeros(self.num_networks)
rew_losses = np.zeros(self.num_networks)
not_done_losses = np.zeros(self.num_networks)
dynamics_losses = np.zeros(self.num_networks)
for i, network in enumerate(self.networks):
mean, _ = self.get_output(network.forward(valid_in), ret_logvar=True)
if final:
mse_loss = tf.reduce_mean(((mean - valid_targ) ** 2), 0)
rew_loss = mse_loss[0]
not_done_loss = mse_loss[1]
dynamics_loss = tf.reduce_mean(mse_loss[2:], 0)
mse_losses[i] = tf.reduce_mean(mse_loss, 0)
rew_losses[i] = rew_loss
not_done_losses[i] = not_done_loss
dynamics_losses[i] = dynamics_loss
else:
mse_loss = tf.reduce_mean((mean - valid_targ) ** 2, 0)
mse_losses[i] = tf.reduce_mean(mse_loss, 0)
if final:
return mse_losses, rew_losses, not_done_losses, dynamics_losses
return mse_losses
def _prepare_dataset(self, buffer):
state, action, next_state, reward, not_done = buffer.sample_all()
delta_state = next_state - state
inputs = tf.concat((state, action), -1)
targets = tf.concat((reward, not_done, delta_state), -1)
return inputs, targets
def _start_train(self, max_epochs_since_update):
self._snapshots = np.array([1e10 for i in range(self.num_networks)])
self._epochs_since_update = 0
self._max_epochs_since_update = max_epochs_since_update
def _end_train(self):
sorted_inds = np.argsort(self._snapshots)
self._model_inds = sorted_inds[:self.num_elites].tolist()
print('Final holdout_losses: ', self._snapshots)
print('Model MSE', np.mean(self._snapshots[self._model_inds]))
print('Rew MSE', np.mean(self._reward_mse[self._model_inds]))
print('Not Done MSE', np.mean(self._not_done_mse[self._model_inds]))
print('Dyn MSE', np.mean(self._dynamics_mse[self._model_inds]))
def _save_best(self, epoch, holdout_losses):
updated = False
for i in range(len(holdout_losses)):
current = holdout_losses[i]
best = self._snapshots[i]
improvement = (best - current) / best
if improvement > 0.01:
self._snapshots[i] = current
updated = True
print('epoch {} | updated {} | improvement: {:.4f} | best: {:.4f} | current: {:.4f}'.format(\
epoch, i, improvement, best, current))
if updated:
self._epochs_since_update = 0
else:
self._epochs_since_update += 1
if self._epochs_since_update > self._max_epochs_since_update:
print('[ PE ] Breaking at epoch {}: {} epochs since update ({} max)'.format(epoch,
self._epochs_since_update, self._max_epochs_since_update))
return True
else:
return False
def train(self, buffer, batch_size=256, holdout_ratio=0.2, max_logging=5000,
max_grad_updates=None, max_t=None, max_epochs_since_update=5):
self._start_train(max_epochs_since_update)
inputs, targets = self._prepare_dataset(buffer)
num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)
inputs, holdout_inputs = inputs[num_holdout:], inputs[:num_holdout]
targets, holdout_targets = targets[num_holdout:], targets[:num_holdout]
print('[ Euler PE ] Training {} | Target {} | Holdout: {}'.format(inputs.shape, targets.shape,
holdout_inputs.shape))
idxs = tf.convert_to_tensor(np.random.randint(inputs.shape[0], size=(inputs.shape[0],)))
num_batch = int(np.ceil(idxs.shape[-1] / batch_size))
t0 = time.time()
grad_updates = 0
for epoch in itertools.count():
for batch_num in range(num_batch):
batch_idxs = idxs[batch_num * batch_size:(batch_num + 1) * batch_size]
train_in = tf.gather(inputs, batch_idxs)
train_targ = tf.gather(targets, batch_idxs)
for network in self.networks:
with tf.GradientTape() as tape:
train_loss = self._train_loss_one(network, train_in, train_targ)
network_grad = tape.gradient(train_loss, network.trainable_variables)
self.optimizer.apply_gradients(zip(network_grad, network.trainable_variables))
grad_updates += 1
idxs = tf.random.shuffle(idxs)
holdout_losses = self._MSE_loss(holdout_inputs, holdout_targets)
break_train = self._save_best(epoch, holdout_losses)
print("[ PE ] holdout_losses: ", f"Epoch {epoch}", holdout_losses)
t = time.time() - t0
if break_train or (max_grad_updates and grad_updates > max_grad_updates):
break
if max_t and t > max_t:
print('Breaking because of timeout: {}! (max: {})'.format(t, max_t))
break
self._snapshots, self._reward_mse, self._not_done_mse, self._dynamics_mse \
= self._MSE_loss(holdout_inputs, holdout_targets, final=True)
self._end_train()
print(f"End of Model training {epoch} epochs and time {t:.0f}s")
print('Model training epoch', epoch)
print('Model training time', int(t))
return grad_updates
if state.ndim == 1:
state = tf.expand_dims(state, 0)
if action.ndim == 1:
action = tf.expand_dims(action, 0) \
if action.shape[0] == self.action_dim else tf.expand_dims(action, 1)
inputs = tf.concat((state, action), -1)
assert inputs.ndim == 2
return inputs
def _random_inds(self, batch_size):
inds = np.random.choice(self._model_inds, size=batch_size)
return inds
def predict(self, state, action, deterministic=False):
inputs = self._prepare_input(state, action)
batch_size = state.shape[0] if len(state.shape) > 1 else 1
inds = self._random_inds(batch_size)
network_2_batch_mapping = defaultdict(list)
for batch_number, model_idx in enumerate(inds):
network_2_batch_mapping[model_idx].append(batch_number)
samples = [0] * batch_size
for model_idx, batch_numbers in network_2_batch_mapping.items():
model_inputs = tf.gather_nd(inputs, [[i] for i in batch_numbers])
pred_mean, pred_var = self.get_output(self.networks[model_idx].forward(model_inputs), ret_logvar=False)
zeros_padding = tf.zeros([len(batch_numbers), 2])
cur_state = tf.concat([zeros_padding, tf.gather_nd(state, [[i] for i in batch_numbers])], 1)
pred_mean = pred_mean + cur_state
if deterministic == True:
for idx, bi in enumerate(batch_numbers):
samples[bi] = pred_mean[idx, :]
else:
for idx, bi in enumerate(batch_numbers):
samples[bi] = tf.random.normal(shape = (1, self.state_dim + 2), mean = pred_mean[idx,:], stddev = tf.sqrt(pred_var[idx,:]))
samples = tf.squeeze(tf.convert_to_tensor(samples), 1)
return samples
if __name__ == '__main__':
import pybullet_envs
import gym
import utils
env = gym.make("InvertedPendulumBulletEnv-v0")
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
replay_buffer = utils.ReplayBuffer(state_size, action_size, max_size=int(1e6))
o = env.reset()
total_steps = 25000
step = 0
while step < total_steps:
a = env.action_space.sample()
o2, r, d, info = env.step(a)
step += 1
replay_buffer.add(o, a, o2, r, float(d))
o = o2
if d:
o = env.reset()
model = PE(state_size, action_size)
model.train(replay_buffer)
| true
| true
|
f7157a7507148ed0eab64630453d5382f6fcb0e0
| 264
|
py
|
Python
|
project/api/migrations/0052_merge_0051_catalog_content_0051_video_resource_group.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | null | null | null |
project/api/migrations/0052_merge_0051_catalog_content_0051_video_resource_group.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | 2
|
2021-06-07T14:06:05.000Z
|
2021-06-18T16:27:29.000Z
|
project/api/migrations/0052_merge_0051_catalog_content_0051_video_resource_group.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | 2
|
2021-07-27T20:40:18.000Z
|
2021-09-12T16:48:19.000Z
|
# Generated by Django 3.2.3 on 2021-07-13 14:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0051_catalog_content'),
('api', '0051_video_resource_group'),
]
operations = [
]
| 17.6
| 47
| 0.636364
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0051_catalog_content'),
('api', '0051_video_resource_group'),
]
operations = [
]
| true
| true
|
f7157aa07d402c4517f82d9775f1feb82ec86069
| 1,855
|
py
|
Python
|
repos/system_upgrade/el7toel8/actors/checkbootavailspace/tests/unit_test.py
|
panovotn/leapp-repository
|
e80bdbf65393e68bc2e91b43b46fdd9b9b787878
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el7toel8/actors/checkbootavailspace/tests/unit_test.py
|
panovotn/leapp-repository
|
e80bdbf65393e68bc2e91b43b46fdd9b9b787878
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el7toel8/actors/checkbootavailspace/tests/unit_test.py
|
panovotn/leapp-repository
|
e80bdbf65393e68bc2e91b43b46fdd9b9b787878
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
from leapp.libraries.actor.library import (MIN_AVAIL_BYTES_FOR_BOOT,
check_avail_space_on_boot,
inhibit_upgrade)
from leapp import reporting
from leapp.libraries.common.testutils import create_report_mocked
class fake_get_avail_bytes_on_boot(object):
def __init__(self, size):
self.size = size
def __call__(self, *args):
return self.size
def test_not_enough_space_available(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
# Test 0 bytes available /boot
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(0)
check_avail_space_on_boot(get_avail_bytes_on_boot)
# Test 0.1 MiB less then required in /boot
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT - 0.1 * 2**20)
check_avail_space_on_boot(get_avail_bytes_on_boot)
assert reporting.create_report.called == 2
def test_enough_space_available(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT)
check_avail_space_on_boot(get_avail_bytes_on_boot)
assert reporting.create_report.called == 0
def test_inhibit_upgrade(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
# Test 4.2 MiB available on /boot
bytes_available = 4.2 * 2**20
inhibit_upgrade(bytes_available)
assert reporting.create_report.called == 1
assert 'inhibitor' in reporting.create_report.report_fields['flags']
mib_needed = (MIN_AVAIL_BYTES_FOR_BOOT - bytes_available) / 2**20
assert "needs additional {0} MiB".format(mib_needed) in reporting.create_report.report_fields['summary']
| 35.673077
| 108
| 0.750404
|
from __future__ import division
from leapp.libraries.actor.library import (MIN_AVAIL_BYTES_FOR_BOOT,
check_avail_space_on_boot,
inhibit_upgrade)
from leapp import reporting
from leapp.libraries.common.testutils import create_report_mocked
class fake_get_avail_bytes_on_boot(object):
def __init__(self, size):
self.size = size
def __call__(self, *args):
return self.size
def test_not_enough_space_available(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(0)
check_avail_space_on_boot(get_avail_bytes_on_boot)
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT - 0.1 * 2**20)
check_avail_space_on_boot(get_avail_bytes_on_boot)
assert reporting.create_report.called == 2
def test_enough_space_available(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT)
check_avail_space_on_boot(get_avail_bytes_on_boot)
assert reporting.create_report.called == 0
def test_inhibit_upgrade(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
bytes_available = 4.2 * 2**20
inhibit_upgrade(bytes_available)
assert reporting.create_report.called == 1
assert 'inhibitor' in reporting.create_report.report_fields['flags']
mib_needed = (MIN_AVAIL_BYTES_FOR_BOOT - bytes_available) / 2**20
assert "needs additional {0} MiB".format(mib_needed) in reporting.create_report.report_fields['summary']
| true
| true
|
f7157bf8af638e897f07970e2094a05bd644cb21
| 162
|
py
|
Python
|
boa3_test/test_sc/native_test/contractmanagement/DestroyContractTooManyArguments.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/native_test/contractmanagement/DestroyContractTooManyArguments.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/native_test/contractmanagement/DestroyContractTooManyArguments.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from typing import Any
from boa3.builtin.nativecontract.contractmanagement import ContractManagement
def Main(arg0: Any):
ContractManagement.destroy(arg0)
| 20.25
| 77
| 0.820988
|
from typing import Any
from boa3.builtin.nativecontract.contractmanagement import ContractManagement
def Main(arg0: Any):
ContractManagement.destroy(arg0)
| true
| true
|
f7157c2e2f6a53fa18f4f1a00dcbb3a3da29ecfd
| 15,984
|
py
|
Python
|
conta/main/views.py
|
osso73/contabilidad
|
babdedfdb47b2b4fd01a09e2db9db5d21bbc88f0
|
[
"MIT"
] | null | null | null |
conta/main/views.py
|
osso73/contabilidad
|
babdedfdb47b2b4fd01a09e2db9db5d21bbc88f0
|
[
"MIT"
] | 23
|
2021-12-29T21:41:37.000Z
|
2022-03-31T10:01:54.000Z
|
conta/main/views.py
|
osso73/contabilidad
|
babdedfdb47b2b4fd01a09e2db9db5d21bbc88f0
|
[
"MIT"
] | 1
|
2022-02-18T19:58:52.000Z
|
2022-02-18T19:58:52.000Z
|
import datetime
from django.shortcuts import render
from django.views import View
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.db.models.deletion import ProtectedError
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from main.models import Etiqueta, Cuenta, Movimiento, FiltroMovimientos, FiltroCuentas
import main.functions as functions
class IndexView(View):
"""Página principal"""
def get(self, request, *args, **kwargs):
context = { 'tab': 'principal' }
return render(request, 'main/index.html', context)
class CuentasView(LoginRequiredMixin, View):
"""Listado de cuentas. Permite añadir una cuenta nueva."""
def get(self, request, pag=1, *args, **kwargs):
lista_cuentas = Cuenta.objects.all()
lista_etiquetas = Etiqueta.objects.all().order_by('id')
# Si no existe el filtro lo crea, con los valores por defecto
filtro = FiltroCuentas.objects.all()
if len(filtro) == 0:
filtro = FiltroCuentas()
filtro.save()
else:
filtro = filtro[0]
# aplica el filtro
if filtro.num:
lista_cuentas = lista_cuentas.filter(pk=filtro.num)
if filtro.nombre:
lista_cuentas = lista_cuentas.filter(nombre__contains=filtro.nombre)
if filtro.etiqueta:
lista_cuentas = lista_cuentas.filter(etiqueta=filtro.etiqueta)
# aplica orden
orden = '-' if not filtro.ascendiente else ''
lista_cuentas = lista_cuentas.order_by(orden+filtro.campo)
# cálculo de paginación. 10 resultados por página
paginacion, num_cuentas, pag, lista_cuentas = functions.get_pagination(pag, lista_cuentas)
context = {
'tab': 'cuentas',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'filtro': filtro,
'paginacion': paginacion,
'pagina_actual': pag,
'num_cuentas': num_cuentas,
}
return render(request, 'main/cuentas.html', context)
def post(self, request, *args, **kwargs):
nueva_cuenta = Cuenta(
num = request.POST['num'].strip(),
nombre = request.POST['nombre']
)
nueva_cuenta.save()
e = request.POST['etiqueta']
if len(e):
nombres_etiquetas = e.split(', ')
nueva_cuenta.etiqueta.set(nombres_etiquetas)
nueva_cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
class AsientosView(LoginRequiredMixin, View):
"""Listado de asientos (o movimientos). Permite añadir un asiento
simple nuevo.
"""
def get(self, request, pag=1, *args, **kwargs):
lista_movimientos = Movimiento.objects.all().order_by('num')
lista_cuentas = Cuenta.objects.all().order_by('num')
# Si no existe el filtro lo crea, con los valores por defecto
filtro = FiltroMovimientos.objects.all()
if len(filtro) == 0:
filtro = FiltroMovimientos()
filtro.save()
else:
filtro = filtro[0]
# aplicación del filtro
if filtro.fecha_inicial:
fecha = datetime.date.fromisoformat(filtro.fecha_inicial)
lista_movimientos = lista_movimientos.filter(fecha__gte=fecha)
if filtro.fecha_final:
fecha = datetime.date.fromisoformat(filtro.fecha_final)
lista_movimientos = lista_movimientos.filter(fecha__lte=fecha)
if filtro.cuenta:
lista_movimientos = lista_movimientos.filter(cuenta=filtro.cuenta)
if filtro.descripcion:
lista_movimientos = lista_movimientos.filter(descripcion__contains=filtro.descripcion)
if filtro.asiento:
lista_movimientos = lista_movimientos.filter(num=int(filtro.asiento))
total_haber = total_debe = 0
for m in lista_movimientos:
total_debe += m.debe
total_haber += m.haber
total = total_haber - total_debe
# aplica orden
orden = '-' if not filtro.ascendiente else ''
lista_movimientos = lista_movimientos.order_by(orden+filtro.campo)
# cálculo de paginación. 25 resultados por página
paginacion, num_movimientos, pag, lista_movimientos = functions.get_pagination(pag, lista_movimientos)
context = {
'tab': 'asientos',
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas,
'filtro': filtro,
'total_debe': total_debe,
'total_haber': total_haber,
'total': total,
'paginacion': paginacion,
'pagina_actual': pag,
'num_movimientos': num_movimientos,
}
return render(request, 'main/asientos.html', context)
def post(self, request, *args, **kwargs):
num = functions.max_num_asiento()
pk_debe = request.POST['debe'].split(':')[0]
pk_haber = request.POST['haber'].split(':')[0]
simple = {
'num': num+1,
'fecha': request.POST['fecha'],
'descripcion': request.POST['descripcion'],
'valor': request.POST['valor'],
'debe': Cuenta.objects.get(pk=pk_debe),
'haber': Cuenta.objects.get(pk=pk_haber)
}
functions.crea_asiento_simple(simple)
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarAsientoView(LoginRequiredMixin, View):
def get(self, request, num):
lista_movimientos = [ a for a in Movimiento.objects.all() if a.num == num ]
lista_cuentas = Cuenta.objects.all()
for movimiento in lista_movimientos:
fecha_movimiento = f'{movimiento.fecha.year}-{movimiento.fecha.month:02d}-{movimiento.fecha.day:02d}'
movimiento.fecha = fecha_movimiento
context = {
'tab': 'asientos',
'num_asiento': num,
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas
}
return render(request, 'main/modificar_asiento.html', context)
def post(self, request, *args, **kwargs):
num_items = int((len(request.POST) -1 )/ 7)
for i in range(num_items):
movimiento = Movimiento.objects.get(id=request.POST[f'id{i}'])
movimiento.num = int(request.POST[f'num{i}'])
movimiento.fecha = request.POST[f'fecha{i}']
movimiento.descripcion = request.POST[f'descripcion{i}']
movimiento.debe = float(request.POST[f'debe{i}'])
movimiento.haber = float(request.POST[f'haber{i}'])
num_cuenta = int(request.POST[f'cuenta{i}'].split(':')[0])
cuenta = Cuenta.objects.get(num=num_cuenta)
movimiento.cuenta = cuenta
movimiento.save()
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarCuentaView(LoginRequiredMixin, View):
def get(self, request, num):
context = {
'tab': 'cuentas',
'cuenta': Cuenta.objects.get(pk=num),
}
return render(request, 'main/modificar_cuenta.html', context)
def post(self, request, *args, **kwargs):
cuenta = Cuenta.objects.get(pk=request.POST['num'])
cuenta.nombre = request.POST['nombre']
etiquetas = request.POST['etiqueta'].split(', ')
# validación etiquetas
lista_etiquetas = Etiqueta.objects.all()
etiquetas_sin_error = list()
for e in etiquetas:
if lista_etiquetas.filter(id=e):
etiquetas_sin_error.append(e)
cuenta.etiqueta.set(etiquetas_sin_error)
cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_movimiento(request, pk, pagina, num_asiento=None):
movimiento = Movimiento.objects.get(pk=pk)
movimiento.delete()
if num_asiento:
return HttpResponseRedirect(reverse(f'main:{pagina}', args=[num_asiento]))
else:
return HttpResponseRedirect(reverse(f'main:{pagina}'))
@login_required
def anadir_movimiento(request, num, fecha):
movimiento = Movimiento(
num = num,
fecha = fecha,
descripcion = '',
debe = 0,
haber = 0,
cuenta = Cuenta.objects.all()[0]
)
movimiento.save()
return HttpResponseRedirect(reverse(f'main:modificar_asiento', args=[num]))
@login_required
def borrar_cuenta(request, pk):
cuenta = Cuenta.objects.get(pk=pk)
try:
cuenta.delete()
except ProtectedError as e:
aviso = {
'mensaje': "Esta cuenta no se puede borrar, porque tiene movimientos asociados.",
'nuevo_url': reverse('main:cuentas'),
}
context = {
'tab': 'cuentas',
'aviso': aviso,
}
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
class CargarCuentas(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:cuentas'))
def post(self, request, *args, **kwargs):
datos_excel = functions.extraer_cuentas(request.FILES['file'])
sobreescribir = request.POST.get('sobreescribir', False)
cuentas_anadidas, cuentas_error = functions.crear_cuentas(datos_excel, sobreescribir)
context = {
'tab': 'cuentas',
'cuentas_anadidas': cuentas_anadidas,
'cuentas_error': cuentas_error,
}
return render(request, 'main/cargar_cuentas.html', context)
class CargarAsientos(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:asientos'))
def post(self, request, *args, **kwargs):
simple, compleja = functions.extraer_asientos(request.FILES['file'])
movimientos_anadidos, errores_simple, errores_compleja = functions.crear_asientos(simple, compleja)
context = {
'tab': 'asientos',
'movimientos_anadidos': movimientos_anadidos,
'errores_simple': errores_simple,
'errores_compleja': errores_compleja,
'num_errores': len(errores_simple) + len(errores_compleja)
}
return render(request, 'main/cargar_asientos.html', context)
@login_required
def filtro_cuentas(request):
if request.method == 'POST':
filtro = FiltroCuentas.objects.all()[0]
if request.POST['accion_filtro'] == 'aplicar':
filtro.num = request.POST['f_num']
filtro.nombre = request.POST['f_nombre']
filtro.etiqueta = request.POST['f_etiqueta']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro.num = ''
filtro.nombre = ''
filtro.etiqueta = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def filtro_asientos(request):
if request.method == 'POST':
if request.POST['accion_filtro'] == 'aplicar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = request.POST['f_fecha_inicial']
filtro.fecha_final = request.POST['f_fecha_final']
filtro.descripcion = request.POST['f_descripcion']
filtro.cuenta = request.POST['f_cuenta'].split(':')[0]
filtro.asiento = request.POST['f_asiento']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = ''
filtro.fecha_final = ''
filtro.descripcion = ''
filtro.cuenta = ''
filtro.asiento = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:asientos'))
@login_required
def cambiar_orden(request, tipo, campo):
if tipo == 'asientos':
filtro = FiltroMovimientos.objects.all()[0]
elif tipo == 'cuentas':
filtro = FiltroCuentas.objects.all()[0]
else:
return HttpResponseRedirect(reverse('main:index'))
if filtro.campo == campo.lower():
filtro.ascendiente = not filtro.ascendiente
else:
filtro.campo = campo.lower()
filtro.ascendiente = True
filtro.save()
return HttpResponseRedirect(reverse('main:'+tipo))
@login_required
def gestionar_etiqueta(request):
"""Gestiona el formulario para añadir o borrar etiquetas, dentro de la
vista de cuentas. Solo gestiona peticiones de tipo post.
"""
if request.method == 'POST':
accion = request.POST['accion_etiqueta']
id = request.POST['e_id']
nombre = request.POST['e_nombre']
if accion == 'anadir':
Etiqueta.objects.create(
id = id,
nombre = nombre,
)
elif accion == 'borrar':
e = Etiqueta.objects.filter(id=id)
if len(e):
e[0].delete()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
class InformesView(LoginRequiredMixin, View):
"""Página principal"""
def get(self, request, *args, **kwargs):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'df': {'empty': True },
}
return render(request, 'main/informes.html', context)
def post(self, request):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
movimientos = Movimiento.objects.all()
movimientos = functions.filtra_movimientos(request.POST, movimientos)
df = functions.genera_informe(request.POST['f_tipo'], movimientos)
titulo, subtitulo = functions.titulo_informe(request.POST)
graph = functions.grafico_informe(df)
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'titulo': titulo,
'subtitulo': subtitulo,
'df': df,
'filtro': request.POST,
'graph': graph,
}
return render(request, 'main/informes.html', context)
@login_required
def borrar_multiples_cuentas(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
cuenta = Cuenta.objects.get(pk=request.POST[checked])
try:
cuenta.delete()
except ProtectedError as e:
errors.append(cuenta)
context = { 'tab': 'cuentas' }
if errors:
nombres = [ c.nombre for c in errors ]
nombres = ", ".join(nombres)
aviso = {
'mensaje': f"La(s) siguiente(s) cuentas no se pueden borrar, porque tienen movimientos asociados: {nombres}.",
'nuevo_url': reverse('main:cuentas'),
}
context['aviso'] = aviso
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_multiples_movimientos(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
movimiento = Movimiento.objects.get(pk=request.POST[checked])
movimiento.delete()
return HttpResponseRedirect(reverse('main:asientos'))
| 34.081023
| 126
| 0.611424
|
import datetime
from django.shortcuts import render
from django.views import View
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.db.models.deletion import ProtectedError
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from main.models import Etiqueta, Cuenta, Movimiento, FiltroMovimientos, FiltroCuentas
import main.functions as functions
class IndexView(View):
def get(self, request, *args, **kwargs):
context = { 'tab': 'principal' }
return render(request, 'main/index.html', context)
class CuentasView(LoginRequiredMixin, View):
def get(self, request, pag=1, *args, **kwargs):
lista_cuentas = Cuenta.objects.all()
lista_etiquetas = Etiqueta.objects.all().order_by('id')
filtro = FiltroCuentas.objects.all()
if len(filtro) == 0:
filtro = FiltroCuentas()
filtro.save()
else:
filtro = filtro[0]
if filtro.num:
lista_cuentas = lista_cuentas.filter(pk=filtro.num)
if filtro.nombre:
lista_cuentas = lista_cuentas.filter(nombre__contains=filtro.nombre)
if filtro.etiqueta:
lista_cuentas = lista_cuentas.filter(etiqueta=filtro.etiqueta)
orden = '-' if not filtro.ascendiente else ''
lista_cuentas = lista_cuentas.order_by(orden+filtro.campo)
paginacion, num_cuentas, pag, lista_cuentas = functions.get_pagination(pag, lista_cuentas)
context = {
'tab': 'cuentas',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'filtro': filtro,
'paginacion': paginacion,
'pagina_actual': pag,
'num_cuentas': num_cuentas,
}
return render(request, 'main/cuentas.html', context)
def post(self, request, *args, **kwargs):
nueva_cuenta = Cuenta(
num = request.POST['num'].strip(),
nombre = request.POST['nombre']
)
nueva_cuenta.save()
e = request.POST['etiqueta']
if len(e):
nombres_etiquetas = e.split(', ')
nueva_cuenta.etiqueta.set(nombres_etiquetas)
nueva_cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
class AsientosView(LoginRequiredMixin, View):
def get(self, request, pag=1, *args, **kwargs):
lista_movimientos = Movimiento.objects.all().order_by('num')
lista_cuentas = Cuenta.objects.all().order_by('num')
filtro = FiltroMovimientos.objects.all()
if len(filtro) == 0:
filtro = FiltroMovimientos()
filtro.save()
else:
filtro = filtro[0]
if filtro.fecha_inicial:
fecha = datetime.date.fromisoformat(filtro.fecha_inicial)
lista_movimientos = lista_movimientos.filter(fecha__gte=fecha)
if filtro.fecha_final:
fecha = datetime.date.fromisoformat(filtro.fecha_final)
lista_movimientos = lista_movimientos.filter(fecha__lte=fecha)
if filtro.cuenta:
lista_movimientos = lista_movimientos.filter(cuenta=filtro.cuenta)
if filtro.descripcion:
lista_movimientos = lista_movimientos.filter(descripcion__contains=filtro.descripcion)
if filtro.asiento:
lista_movimientos = lista_movimientos.filter(num=int(filtro.asiento))
total_haber = total_debe = 0
for m in lista_movimientos:
total_debe += m.debe
total_haber += m.haber
total = total_haber - total_debe
orden = '-' if not filtro.ascendiente else ''
lista_movimientos = lista_movimientos.order_by(orden+filtro.campo)
paginacion, num_movimientos, pag, lista_movimientos = functions.get_pagination(pag, lista_movimientos)
context = {
'tab': 'asientos',
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas,
'filtro': filtro,
'total_debe': total_debe,
'total_haber': total_haber,
'total': total,
'paginacion': paginacion,
'pagina_actual': pag,
'num_movimientos': num_movimientos,
}
return render(request, 'main/asientos.html', context)
def post(self, request, *args, **kwargs):
num = functions.max_num_asiento()
pk_debe = request.POST['debe'].split(':')[0]
pk_haber = request.POST['haber'].split(':')[0]
simple = {
'num': num+1,
'fecha': request.POST['fecha'],
'descripcion': request.POST['descripcion'],
'valor': request.POST['valor'],
'debe': Cuenta.objects.get(pk=pk_debe),
'haber': Cuenta.objects.get(pk=pk_haber)
}
functions.crea_asiento_simple(simple)
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarAsientoView(LoginRequiredMixin, View):
def get(self, request, num):
lista_movimientos = [ a for a in Movimiento.objects.all() if a.num == num ]
lista_cuentas = Cuenta.objects.all()
for movimiento in lista_movimientos:
fecha_movimiento = f'{movimiento.fecha.year}-{movimiento.fecha.month:02d}-{movimiento.fecha.day:02d}'
movimiento.fecha = fecha_movimiento
context = {
'tab': 'asientos',
'num_asiento': num,
'lista_movimientos': lista_movimientos,
'lista_cuentas': lista_cuentas
}
return render(request, 'main/modificar_asiento.html', context)
def post(self, request, *args, **kwargs):
num_items = int((len(request.POST) -1 )/ 7)
for i in range(num_items):
movimiento = Movimiento.objects.get(id=request.POST[f'id{i}'])
movimiento.num = int(request.POST[f'num{i}'])
movimiento.fecha = request.POST[f'fecha{i}']
movimiento.descripcion = request.POST[f'descripcion{i}']
movimiento.debe = float(request.POST[f'debe{i}'])
movimiento.haber = float(request.POST[f'haber{i}'])
num_cuenta = int(request.POST[f'cuenta{i}'].split(':')[0])
cuenta = Cuenta.objects.get(num=num_cuenta)
movimiento.cuenta = cuenta
movimiento.save()
return HttpResponseRedirect(reverse('main:asientos'))
class ModificarCuentaView(LoginRequiredMixin, View):
def get(self, request, num):
context = {
'tab': 'cuentas',
'cuenta': Cuenta.objects.get(pk=num),
}
return render(request, 'main/modificar_cuenta.html', context)
def post(self, request, *args, **kwargs):
cuenta = Cuenta.objects.get(pk=request.POST['num'])
cuenta.nombre = request.POST['nombre']
etiquetas = request.POST['etiqueta'].split(', ')
lista_etiquetas = Etiqueta.objects.all()
etiquetas_sin_error = list()
for e in etiquetas:
if lista_etiquetas.filter(id=e):
etiquetas_sin_error.append(e)
cuenta.etiqueta.set(etiquetas_sin_error)
cuenta.save()
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_movimiento(request, pk, pagina, num_asiento=None):
movimiento = Movimiento.objects.get(pk=pk)
movimiento.delete()
if num_asiento:
return HttpResponseRedirect(reverse(f'main:{pagina}', args=[num_asiento]))
else:
return HttpResponseRedirect(reverse(f'main:{pagina}'))
@login_required
def anadir_movimiento(request, num, fecha):
movimiento = Movimiento(
num = num,
fecha = fecha,
descripcion = '',
debe = 0,
haber = 0,
cuenta = Cuenta.objects.all()[0]
)
movimiento.save()
return HttpResponseRedirect(reverse(f'main:modificar_asiento', args=[num]))
@login_required
def borrar_cuenta(request, pk):
cuenta = Cuenta.objects.get(pk=pk)
try:
cuenta.delete()
except ProtectedError as e:
aviso = {
'mensaje': "Esta cuenta no se puede borrar, porque tiene movimientos asociados.",
'nuevo_url': reverse('main:cuentas'),
}
context = {
'tab': 'cuentas',
'aviso': aviso,
}
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
class CargarCuentas(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:cuentas'))
def post(self, request, *args, **kwargs):
datos_excel = functions.extraer_cuentas(request.FILES['file'])
sobreescribir = request.POST.get('sobreescribir', False)
cuentas_anadidas, cuentas_error = functions.crear_cuentas(datos_excel, sobreescribir)
context = {
'tab': 'cuentas',
'cuentas_anadidas': cuentas_anadidas,
'cuentas_error': cuentas_error,
}
return render(request, 'main/cargar_cuentas.html', context)
class CargarAsientos(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse('main:asientos'))
def post(self, request, *args, **kwargs):
simple, compleja = functions.extraer_asientos(request.FILES['file'])
movimientos_anadidos, errores_simple, errores_compleja = functions.crear_asientos(simple, compleja)
context = {
'tab': 'asientos',
'movimientos_anadidos': movimientos_anadidos,
'errores_simple': errores_simple,
'errores_compleja': errores_compleja,
'num_errores': len(errores_simple) + len(errores_compleja)
}
return render(request, 'main/cargar_asientos.html', context)
@login_required
def filtro_cuentas(request):
if request.method == 'POST':
filtro = FiltroCuentas.objects.all()[0]
if request.POST['accion_filtro'] == 'aplicar':
filtro.num = request.POST['f_num']
filtro.nombre = request.POST['f_nombre']
filtro.etiqueta = request.POST['f_etiqueta']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro.num = ''
filtro.nombre = ''
filtro.etiqueta = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def filtro_asientos(request):
if request.method == 'POST':
if request.POST['accion_filtro'] == 'aplicar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = request.POST['f_fecha_inicial']
filtro.fecha_final = request.POST['f_fecha_final']
filtro.descripcion = request.POST['f_descripcion']
filtro.cuenta = request.POST['f_cuenta'].split(':')[0]
filtro.asiento = request.POST['f_asiento']
filtro.save()
elif request.POST['accion_filtro'] == 'borrar':
filtro = FiltroMovimientos.objects.all()[0]
filtro.fecha_inicial = ''
filtro.fecha_final = ''
filtro.descripcion = ''
filtro.cuenta = ''
filtro.asiento = ''
filtro.save()
else:
pass
return HttpResponseRedirect(reverse('main:asientos'))
@login_required
def cambiar_orden(request, tipo, campo):
if tipo == 'asientos':
filtro = FiltroMovimientos.objects.all()[0]
elif tipo == 'cuentas':
filtro = FiltroCuentas.objects.all()[0]
else:
return HttpResponseRedirect(reverse('main:index'))
if filtro.campo == campo.lower():
filtro.ascendiente = not filtro.ascendiente
else:
filtro.campo = campo.lower()
filtro.ascendiente = True
filtro.save()
return HttpResponseRedirect(reverse('main:'+tipo))
@login_required
def gestionar_etiqueta(request):
if request.method == 'POST':
accion = request.POST['accion_etiqueta']
id = request.POST['e_id']
nombre = request.POST['e_nombre']
if accion == 'anadir':
Etiqueta.objects.create(
id = id,
nombre = nombre,
)
elif accion == 'borrar':
e = Etiqueta.objects.filter(id=id)
if len(e):
e[0].delete()
else:
pass
return HttpResponseRedirect(reverse('main:cuentas'))
class InformesView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'df': {'empty': True },
}
return render(request, 'main/informes.html', context)
def post(self, request):
lista_cuentas = Cuenta.objects.all().order_by('num')
lista_etiquetas = Etiqueta.objects.all().order_by('id')
movimientos = Movimiento.objects.all()
movimientos = functions.filtra_movimientos(request.POST, movimientos)
df = functions.genera_informe(request.POST['f_tipo'], movimientos)
titulo, subtitulo = functions.titulo_informe(request.POST)
graph = functions.grafico_informe(df)
context = {
'tab': 'informes',
'lista_cuentas': lista_cuentas,
'lista_etiquetas': lista_etiquetas,
'titulo': titulo,
'subtitulo': subtitulo,
'df': df,
'filtro': request.POST,
'graph': graph,
}
return render(request, 'main/informes.html', context)
@login_required
def borrar_multiples_cuentas(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
cuenta = Cuenta.objects.get(pk=request.POST[checked])
try:
cuenta.delete()
except ProtectedError as e:
errors.append(cuenta)
context = { 'tab': 'cuentas' }
if errors:
nombres = [ c.nombre for c in errors ]
nombres = ", ".join(nombres)
aviso = {
'mensaje': f"La(s) siguiente(s) cuentas no se pueden borrar, porque tienen movimientos asociados: {nombres}.",
'nuevo_url': reverse('main:cuentas'),
}
context['aviso'] = aviso
return render(request, 'main/cuentas.html', context)
return HttpResponseRedirect(reverse('main:cuentas'))
@login_required
def borrar_multiples_movimientos(request):
if request.method == 'POST':
errors = list()
for checked in request.POST.keys():
if not checked.startswith('check'):
continue
movimiento = Movimiento.objects.get(pk=request.POST[checked])
movimiento.delete()
return HttpResponseRedirect(reverse('main:asientos'))
| true
| true
|
f7157cc2826df7834aa60b3fb11396d26a4e5f5b
| 2,465
|
py
|
Python
|
day_15/second.py
|
Mizux/adventofcode
|
8bca6b5db1b9f2e64b4038d32680d07766d14e2d
|
[
"Apache-2.0"
] | 1
|
2021-12-11T19:41:25.000Z
|
2021-12-11T19:41:25.000Z
|
day_15/second.py
|
Mizux/adventofcode
|
8bca6b5db1b9f2e64b4038d32680d07766d14e2d
|
[
"Apache-2.0"
] | null | null | null |
day_15/second.py
|
Mizux/adventofcode
|
8bca6b5db1b9f2e64b4038d32680d07766d14e2d
|
[
"Apache-2.0"
] | 1
|
2021-12-06T12:09:44.000Z
|
2021-12-06T12:09:44.000Z
|
#!/usr/bin/env python3
from collections import deque
FILE='test.txt' # sol: 40
FILE='input.txt' # sol: 824
def print_board(board):
for row in board:
print(''.join([str(i) for i in row]))
def parse_input(file, repeat):
board = []
for i in range(repeat):
with open(file, 'r') as f:
for line in f:
board.append([int(c) for c in line.strip()] * repeat)
#print_board(board)
return board
def compute_board(board, repeat):
height = len(board) // repeat
width = len(board[0]) // repeat
# for each grid row
for row_repeat in range(repeat):
if row_repeat != 0: # don't touch grid (0,0)
# update first grid column
for row in range(height):
for col in range(width):
if board[height*(row_repeat-1)+row][col] < 9:
board[height*row_repeat+row][col] = board[height*(row_repeat-1)+row][col] + 1
else:
board[height*row_repeat+row][col] = 1
# update remaining grid columns
for col_repeat in range(1, repeat):
for row in range(height):
for col in range(width):
if board[height*row_repeat+row][width*(col_repeat-1)+col] < 9:
board[height*row_repeat+row][width*col_repeat+col] = board[height*row_repeat+row][width*(col_repeat-1)+col] + 1
else:
board[height*row_repeat+row][width*col_repeat+col] = 1
def get_neighbour(board, pos):
out = []
if pos[0] > 0:
out.append((pos[0]-1, pos[1]))
if pos[0] < len(board) - 1:
out.append((pos[0]+1, pos[1]))
if pos[1] > 0:
out.append((pos[0], pos[1] - 1))
if pos[1] < len(board[0]) - 1:
out.append((pos[0], pos[1] + 1))
return out
def dijkstra(board, start):
queue = deque([start])
distance = {start: 0}
while queue:
cur = queue.popleft()
for point in get_neighbour(board, cur):
dst = distance[cur] + board[point[0]][point[1]]
if (point not in distance or dst < distance[point]):
distance[point] = dst
queue.append(point)
return distance
repeat = 5
board = parse_input(FILE, repeat)
compute_board(board, repeat)
#print_board(board)
distance = dijkstra(board, (0,0))
end = (len(board)-1, len(board[0])-1)
print(f'result {distance[end]}')
| 31.602564
| 135
| 0.55213
|
from collections import deque
FILE='test.txt'
FILE='input.txt'
def print_board(board):
for row in board:
print(''.join([str(i) for i in row]))
def parse_input(file, repeat):
board = []
for i in range(repeat):
with open(file, 'r') as f:
for line in f:
board.append([int(c) for c in line.strip()] * repeat)
return board
def compute_board(board, repeat):
height = len(board) // repeat
width = len(board[0]) // repeat
for row_repeat in range(repeat):
if row_repeat != 0:
# update first grid column
for row in range(height):
for col in range(width):
if board[height*(row_repeat-1)+row][col] < 9:
board[height*row_repeat+row][col] = board[height*(row_repeat-1)+row][col] + 1
else:
board[height*row_repeat+row][col] = 1
# update remaining grid columns
for col_repeat in range(1, repeat):
for row in range(height):
for col in range(width):
if board[height*row_repeat+row][width*(col_repeat-1)+col] < 9:
board[height*row_repeat+row][width*col_repeat+col] = board[height*row_repeat+row][width*(col_repeat-1)+col] + 1
else:
board[height*row_repeat+row][width*col_repeat+col] = 1
def get_neighbour(board, pos):
out = []
if pos[0] > 0:
out.append((pos[0]-1, pos[1]))
if pos[0] < len(board) - 1:
out.append((pos[0]+1, pos[1]))
if pos[1] > 0:
out.append((pos[0], pos[1] - 1))
if pos[1] < len(board[0]) - 1:
out.append((pos[0], pos[1] + 1))
return out
def dijkstra(board, start):
queue = deque([start])
distance = {start: 0}
while queue:
cur = queue.popleft()
for point in get_neighbour(board, cur):
dst = distance[cur] + board[point[0]][point[1]]
if (point not in distance or dst < distance[point]):
distance[point] = dst
queue.append(point)
return distance
repeat = 5
board = parse_input(FILE, repeat)
compute_board(board, repeat)
#print_board(board)
distance = dijkstra(board, (0,0))
end = (len(board)-1, len(board[0])-1)
print(f'result {distance[end]}')
| true
| true
|
f7157d354d86263b22ff896993d75bae3d71e43b
| 21,644
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_subnets_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_subnets_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/_subnets_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations(object):
"""SubnetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Subnet"]
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2017_06_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SubnetListResult"]
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
| 48.747748
| 220
| 0.660968
|
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name,
virtual_network_name,
subnet_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def begin_delete(
self,
resource_group_name,
virtual_network_name,
subnet_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def get(
self,
resource_group_name,
virtual_network_name,
subnet_name,
expand=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def _create_or_update_initial(
self,
resource_group_name,
virtual_network_name,
subnet_name,
subnet_parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def begin_create_or_update(
self,
resource_group_name,
virtual_network_name,
subnet_name,
subnet_parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def list(
self,
resource_group_name,
virtual_network_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'}
| true
| true
|
f7157de80611bef9e79b6363562de1eb0d53d409
| 14,816
|
py
|
Python
|
assignment-01/assignment-01.py
|
ehumss/itu-blu537e-data-analysis-and-visualisation
|
a401b0d8580d2021a9f634607339d074327276cd
|
[
"MIT"
] | 1
|
2020-01-28T12:48:53.000Z
|
2020-01-28T12:48:53.000Z
|
assignment-01/assignment-01.py
|
ehumss/itu-blu537e-data-analysis-and-visualisation
|
a401b0d8580d2021a9f634607339d074327276cd
|
[
"MIT"
] | null | null | null |
assignment-01/assignment-01.py
|
ehumss/itu-blu537e-data-analysis-and-visualisation
|
a401b0d8580d2021a9f634607339d074327276cd
|
[
"MIT"
] | null | null | null |
####################################################################################################
#
# ISTANBUL TECHNICAL UNIVERSITY
# BLU 537E - Data Analysis & Visualization
# Assignment 01
#
####################################################################################################
#
# PROBLEM 1
#
# A store charges $12 per item if you buy less than 10 items.
#
# If you buy between 10 and 99 items, the cost is $10 per item.
#
# If you buy 100 or more items, the cost is $7 per item.
#
# Write a program that takes how many items are bought as an input and prints the total cost.
#
####################################################################################################
NUMBER_OF_ITEMS_TYPE_1 = 10;
NUMBER_OF_ITEMS_TYPE_2 = 100;
CHARGE_TYPE_1 = 12;
CHARGE_TYPE_2 = 10;
CHARGE_TYPE_3 = 7;
def problem1(number_of_items):
# Initially set total cost to zero.
cost = 0;
# If you buy less than "NUMBER_OF_ITEMS_TYPE_1", store charges "CHARGE_TYPE_1" per item.
if (number_of_items < NUMBER_OF_ITEMS_TYPE_1):
cost = number_of_items * CHARGE_TYPE_1;
# Store charges "CHARGE_TYPE_2" per item for the given condition.
elif (number_of_items >= NUMBER_OF_ITEMS_TYPE_1 and number_of_items < NUMBER_OF_ITEMS_TYPE_2):
cost = number_of_items * CHARGE_TYPE_2;
# If you buy more than "NUMBER_OF_ITEMS_TYPE_2", store charges "CHARGE_TYPE_3" per item.
elif (number_of_items >= NUMBER_OF_ITEMS_TYPE_2):
cost = number_of_items * CHARGE_TYPE_3;
print("{} items are bought, the total cost is: {}.".format(number_of_items, cost));
return;
####################################################################################################
#
# PROBLEM 2
#
# Write a program that generates a list of 20 random numbers between 1 and 100.
#
# (a) Print the list.
# (b) Print the average of the elements in the list.
# (c) Print the largest and smallest values in the list.
# (d) Print the second largest and second smallest entries in the list
# (e) Print how many even numbers are in the list.
#
####################################################################################################
import random;
def problem2():
# Create a list.
list = [];
# Insert 20 random numbers between 1 and 100, to the list.
for i in range(1, 20):
list.append(random.randint(1, 100));
# PART (a): Print the list.
print("Part (a): A list of 20 random numbers between 1 and 100 is generated.\n");
print(list);
print("\n***********************************************************\n");
# PART (b): Print the average of the elements in the list.
print("Part (b): The average of the elements in the list is evaluated.\n");
sum = 0;
for i in list:
sum += i;
print("The Average: {}".format(sum/20));
print("\n***********************************************************\n");
# PART (c): Print the largest and the samllest values in the list.
print("Part (c): The largest and the smallest values in the list are found.\n");
max = list[0];
min = list[0];
for i in list:
if max < i:
max = i;
elif min > i:
min = i;
print("The largest value is: {}".format(max));
print("The smallest value is: {}".format(min));
print("\n***********************************************************\n");
# PART (d): Print the second largest and the second smallest entries in the list.
print("Part (d): Second largest and the second smallest entries are found.\n");
second_max = list[0];
second_min = list[0];
for i in list:
if second_max < i and i != max:
second_max = i;
elif second_min > i and i != min:
second_min = i;
print("The second largest value is: {}".format(second_max));
print("The second smallest value is: {}".format(second_min));
print("\n***********************************************************\n");
# PART (e): Print how many even numbers are in the list.
print("Part (e): Total number of even numbers in the list is evaluated.\n");
count = 0;
for i in list:
if i % 2 == 0:
count += 1;
print("Count of Even Numbers: {}".format(count));
print("\n***********************************************************\n");
return;
####################################################################################################
#
# PROBLEM 3
#
# You are given a file named “blood-pressure.csv” which contains blood pressure measurement for some patients.
#
# The first column is for patient id and the second column is for blood pressure measurement in the format of mean[min-max] values.
#
# Write a function that takes this file as an input and do the folowing tasks:
#
# (a) Prints the lowest and highest blood pressure measurements amongs the patients. The output should be 108 and 180.
# (b) Prints the average of the mean values.
#
####################################################################################################
MAX_INTEGER = 65535
MIN_INTEGER = -65535
import csv
import re
def problem3(file):
max = MIN_INTEGER;
min = MAX_INTEGER;
sum = 0;
number_of_rows = 0;
# Open the CSV file using Python's built-in library: csv
with open(file, mode='r') as csv_file:
# Read the file as a dictionary.
csv_reader = csv.DictReader(csv_file)
# Now, we have a dictinary:
# [('id', '1'), ('Blood pressure systolic (mmHg) mean[min-max]', '135[113-166]')],
# [('id', '2'), ('Blood pressure systolic (mmHg) mean[min-max]', '140 [110-155]')], etc.
# Part (a): Print the lowest and the highest blood pressure measurements among the patients.
# The output should be: 108 and 180.
for row in csv_reader:
# Given the 'Blood ... [min-max]' key, find all the integers from the value string: '135[113-166]'.
# Store the integers (blood pressures) in a list.
list = [int(x) for x in re.findall('\d+', row['Blood pressure systolic (mmHg) mean[min-max]'])]
# PART A: MIN-MAX
# In every row, check the min-max values, update when necessary.
if (list[1] > max):
max = list[1];
elif (list[2] > max):
max = list[2];
if (list[1] < min):
min = list[1];
elif (list[2] < min):
min = list[2];
# PART B: THE AVERAGE
sum += list[0];
number_of_rows += 1;
print("PART (A): Print the lowest and the highest blood pressure measurements among the patients.\n");
print("The Lowest Blood Pressure is : {}".format(min))
print("The Highest Blood Pressure is: {}".format(max))
print("\n***********************************************************\n");
# Part (b): Print the average of the mean values.
print("PART (B): Print the average of the mean values.\n")
print("The Average is: {}".format(sum/number_of_rows));
print("\n***********************************************************\n");
return;
####################################################################################################
#
# PROBLEM 4
#
# You are given a csv (gdp_per_capita.csv) file for GDP per capita taken from World Bank.
#
# The file holds data from 1960 to 2017. Note that some data for certain years are missing.
#
# Write a function that takes this file as an input and do the following tasks for Turkey:
#
# (a) Calculate the yearly percentage increase compared to previous year and the find the year that has highest increase in terms of percentage.
# (b) Find the years that GDP per capita decreased compared to the previous year.
#
####################################################################################################
import collections
import csv
import re
def problem4(file):
# Open the CSV file using Python's built-in module: csv
file = open(file, mode = 'r')
# Read the file as a dictionary.
reader = csv.DictReader(file, delimiter=';')
# OrderedDict([('Country Name', 'Aruba'), ('1960', ''), ('1961', ''), ('1962', ''), ('1963', ''), ('1964', ''),
# ('1965', ''), ('1966', ''), ('1967', ''), ('1968', ''), ('1969', ''), ('1970', ''), ('1971', ''), ('1972', ''), ... ]), etc.
# Part (a): Create a dictionary to store yearly percentage increase.
increase_in_terms_of_percentage = {}
for row in reader:
# Part (a): For Turkey, evaluate the increase using formula: (current_gdp - previous_gdp) * 100 / (current_gdp)
if row['Country Name'] == 'Turkey':
# Part (a): Increase percentage is calculated for: [1961 and 2017] time interval.
# Part (a): (There is NO increase percentage for the year 1960.)
for i in range(2, len(reader.fieldnames)):
increase_in_terms_of_percentage[reader.fieldnames[i]] = (float(row[reader.fieldnames[i]]) - float(row[reader.fieldnames[i - 1]])) * 100 / (float(row[reader.fieldnames[i]]))
# Now, we have:
# {'1961': -78.73733197596287, '1962': 7.896086257857306, '1963': 11.753818665509044, '1964': 5.119424708452099,
# '1965': 4.1637664070723455, '1966': 13.387860733884622, '1967': 7.696841934220605, '1968': 8.330375671997261, ..} etc.
# Part (a): Calculate the yearly percentage increase compared to previous year and the find the year that has highest increase in terms of percentage.
print("PART (A): Find the year that has highest increase in terms of percentage.\n")
# Part (a): Using collections module, find the max increase and the year that has the max increase.
print("The YEAR with the HIGHEST INCREASE: {}".format(collections.Counter(increase_in_terms_of_percentage).most_common(1)))
print("\n***********************************************************\n");
# Part (b): Find the years that GDP per capita decreased compared to the previous year.
print("PART (B): Find the years that GDP per capita decreased compared to the previous year.\n")
print("GDP percentage decreased in the following YEARS: \n")
for key, value in increase_in_terms_of_percentage.items():
if value < 0:
print(key, end = ' ')
print("\n\n***********************************************************\n");
####################################################################################################
#
# PROBLEM 5
#
# Norway_new_car_sales_by_model.csv file contains information of the new car sales in Norway between the years 2007-2017.
#
# The dataset was obtained from www.kaggle.com web site. The dataset comprises of monthly car sale quantity for various manufacturers and models.
#
# Make columns shows the manufacturer and Pct column shows the percent share in monlty total sales.
#
# Using this dataset do the following tasks:
#
# (a) Print the number of unique manufacturers in this dataset.
# (b) Find the manufacturer that has the highest car sales in 2010?
#
####################################################################################################
import collections
import csv
def problem5(file):
# Open the CSV file using Python's built-in module: csv
# To avoid UnicodeDecodeError, errors='ignore' parameter is used.
file = open(file, mode = 'r', encoding="utf8", errors='ignore')
# Read the file as a dictionary.
reader = csv.DictReader(file, delimiter = ',')
# Now, we have a dictionary:
# [('Year', '2007'), ('Month', '1'), ('Make', 'Volkswagen '), ('Model', 'Volkswagen Passat'), ('Quantity', '1267'), ('Pct', '10')]
# [('Year', '2007'), ('Month', '1'), ('Make', 'Toyota '), ('Model', 'Toyota Rav4'), ('Quantity', '819'), ('Pct', '6.5')], etc.
# Part (a): Create a list to store all manufacturers.
manufacturer = []
# Part (b): Using collections module, create a counter to be able to count the car sales of each manufacturer.
quantity_of_car_sales = collections.Counter()
for row in reader:
# Part (a): Add all the manufacturers to the list, without considering if it is already in the list or not.
manufacturer.append(row['Make'])
# Part (a): Now, we have a list as follows:
# Part (a): ['Volkswagen ', 'Toyota ', 'Toyota ', 'Volkswagen ', 'Toyota ', 'Peugeot ', 'Skoda ', 'Toyota ', 'Ford ', 'Volvo ', ...]
# Part (b): In year 2010, for each manifacturer find the number of car sales and sum them up.
if row['Year'] == '2010':
quantity_of_car_sales[row['Make']] += int(row['Quantity'])
# Part (a): Using collections module, count the number of occurences of the manufacturers.
unique_manifacturers = collections.Counter(manufacturer)
# Part (a): Now, the keys are unique in this list:
# Part (a): Counter({'Toyota ': 492, 'Volkswagen ': 440, 'Volvo ': 294, 'Ford ': 246, 'Nissan ': 180, 'Audi ': 146,
# Part (a): 'Skoda ': 142, 'Peugeot ': 132, 'BMW ': 130, 'Mitsubishi ': 105, 'Mazda ': 80, 'Mercedes-Benz ': 63,]) etc.
# PART (a): Print the number of unique manufacturers in this dataset.
print("PART (A): Print the number of unique manufacturers in this dataset.\n")
print("The Number of Unique Manifacturers is: {}".format(len(unique_manifacturers.keys())))
print("\n***********************************************************\n");
# PART (b): Find the manufacturer that has the highest car sales in 2010.
print("PART (B): Find the manufacturer that has the highest car sales in 2010.\n")
print("The MANUFACTURER with HIGHEST CAR SALES in 2010: {}".format(quantity_of_car_sales.most_common(1)))
print("\n***********************************************************\n");
####################################################################################################
# TEST CODE
####################################################################################################
# PROBLEM ONE
problem1(1);
problem1(10);
problem1(100);
# PROBLEM TWO
problem2();
# PROBLEM THREE
problem3("blood_pressure.csv");
# PROBLEM FOUR
problem4("gdp_per_capita.csv");
# PROBLEM FIVE
problem5("norway_new_car_sales_by_model.csv");
####################################################################################################
| 43.83432
| 189
| 0.532667
| true
| true
|
|
f7157fcc233e7ad5174d2ffad33f0e7b24b80a15
| 1,120
|
py
|
Python
|
sagemaker_studio/containers/dashboard/src/app.py
|
NihalHarish/sagemaker-explaining-credit-decisions
|
e5965902d8901819a60f8c56517a82ddd17c1f95
|
[
"Apache-2.0"
] | 80
|
2020-04-15T09:35:11.000Z
|
2022-03-23T01:56:12.000Z
|
sagemaker_studio/containers/dashboard/src/app.py
|
IronOnet/sagemaker-explaining-credit-decisions
|
dbb8ea1a685412033c774c2a79cc1e5794438cf9
|
[
"Apache-2.0"
] | 8
|
2020-04-16T16:53:09.000Z
|
2022-02-06T17:07:02.000Z
|
sagemaker_studio/containers/dashboard/src/app.py
|
IronOnet/sagemaker-explaining-credit-decisions
|
dbb8ea1a685412033c774c2a79cc1e5794438cf9
|
[
"Apache-2.0"
] | 28
|
2020-05-25T09:26:41.000Z
|
2022-01-25T22:23:54.000Z
|
from pathlib import Path
import streamlit as st
from package import utils
from pages import local_page, global_page
from shared import list_explanation_groups
def explanation_group_selectbox():
paths = list_explanation_groups()
path = st.sidebar.selectbox(
label='Select explanation group:',
options=paths,
format_func=lambda e: e.split('/')[-2]
)
return path
def explanation_scope_selectbox():
explanation_scope = st.sidebar.selectbox(
label='Select explanation scope:',
options=["local", "global"],
index=1,
format_func=lambda e: {'local': 'Individual', 'global': 'Group'}[e]
)
return explanation_scope
if __name__ == "__main__":
current_folder = utils.get_current_folder(globals())
st.sidebar.markdown('# Explanations Dashboard')
explanation_group_path = explanation_group_selectbox()
explanation_scope = explanation_scope_selectbox()
if explanation_scope == "local":
local_page.show(explanation_group_path)
elif explanation_scope == "global":
global_page.show(explanation_group_path)
| 28.717949
| 75
| 0.707143
|
from pathlib import Path
import streamlit as st
from package import utils
from pages import local_page, global_page
from shared import list_explanation_groups
def explanation_group_selectbox():
paths = list_explanation_groups()
path = st.sidebar.selectbox(
label='Select explanation group:',
options=paths,
format_func=lambda e: e.split('/')[-2]
)
return path
def explanation_scope_selectbox():
explanation_scope = st.sidebar.selectbox(
label='Select explanation scope:',
options=["local", "global"],
index=1,
format_func=lambda e: {'local': 'Individual', 'global': 'Group'}[e]
)
return explanation_scope
if __name__ == "__main__":
current_folder = utils.get_current_folder(globals())
st.sidebar.markdown('# Explanations Dashboard')
explanation_group_path = explanation_group_selectbox()
explanation_scope = explanation_scope_selectbox()
if explanation_scope == "local":
local_page.show(explanation_group_path)
elif explanation_scope == "global":
global_page.show(explanation_group_path)
| true
| true
|
f715800c50b2c0c85b8363141732d1ea4e6cedf4
| 11,896
|
py
|
Python
|
opflexagent/rpc.py
|
shyam81295/python-opflex-agent
|
3b564c93d62734354eea059afec7dce713225872
|
[
"Apache-2.0"
] | null | null | null |
opflexagent/rpc.py
|
shyam81295/python-opflex-agent
|
3b564c93d62734354eea059afec7dce713225872
|
[
"Apache-2.0"
] | null | null | null |
opflexagent/rpc.py
|
shyam81295/python-opflex-agent
|
3b564c93d62734354eea059afec7dce713225872
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import rpc as n_rpc
from neutron.common import topics
from oslo_log import helpers as log
from oslo_log import log as logging
import oslo_messaging
LOG = logging.getLogger(__name__)
TOPIC_OPFLEX = 'opflex'
ENDPOINT = 'endpoint'
VRF = 'vrf'
NOTIFY_VRF = 'notify-vrf'
class AgentNotifierApi(object):
"""Server side notification API:
- Version 1.3: add notify vrf
"""
BASE_RPC_API_VERSION = '1.3'
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.BASE_RPC_API_VERSION)
self.client = n_rpc.get_client(target)
self.topic_port_update = topics.get_topic_name(topic, topics.PORT,
topics.UPDATE)
self.topic_port_delete = topics.get_topic_name(topic, topics.PORT,
topics.DELETE)
self.topic_subnet_update = topics.get_topic_name(topic, topics.SUBNET,
topics.UPDATE)
self.topic_opflex_notify_vrf = topics.get_topic_name(
topic, TOPIC_OPFLEX, NOTIFY_VRF, topics.UPDATE)
self.topic_opflex_endpoint_update = topics.get_topic_name(
topic, TOPIC_OPFLEX, ENDPOINT, topics.UPDATE)
self.topic_opflex_vrf_update = topics.get_topic_name(
topic, TOPIC_OPFLEX, VRF, topics.UPDATE)
def port_update(self, context, port):
host = port.get('binding:host_id')
if host:
cctxt = self.client.prepare(
server=host, topic=self.topic_port_update, version='1.1')
cctxt.cast(context, 'port_update', port=port)
def port_delete(self, context, port):
cctxt = self.client.prepare(fanout=True, topic=self.topic_port_delete,
version='1.1')
cctxt.cast(context, 'port_delete', port=port)
def subnet_update(self, context, subnet):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_subnet_update,
version='1.1')
cctxt.cast(context, 'subnet_update', subnet=subnet)
def opflex_notify_vrf(self, context, vrf):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_opflex_notify_vrf,
version='1.3')
cctxt.cast(context, 'opflex_notify_vrf', vrf=vrf)
def opflex_endpoint_update(self, context, details, host=None):
cctxt = self.client.prepare(
topic=self.topic_opflex_endpoint_update, server=host,
version='1.2')
cctxt.cast(context, 'opflex_endpoint_update', details=details)
def opflex_vrf_update(self, context, details):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_opflex_vrf_update,
version='1.2')
cctxt.cast(context, 'opflex_vrf_update', details=details)
class GBPServerRpcApi(object):
"""Agent-side RPC (stub) for agent-to-plugin interaction.
Version 1.1: add async request_* APIs
"""
GBP_RPC_VERSION = "1.1"
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.GBP_RPC_VERSION)
self.client = n_rpc.get_client(target)
@log.log_method_call
def get_gbp_details(self, context, agent_id, device=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_gbp_details', agent_id=agent_id,
device=device, host=host)
@log.log_method_call
def get_gbp_details_list(self, context, agent_id, devices=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_gbp_details_list', agent_id=agent_id,
devices=devices, host=host)
@log.log_method_call
def get_vrf_details(self, context, agent_id, vrf_id=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_vrf_details', agent_id=agent_id,
vrf_id=vrf_id, host=host)
@log.log_method_call
def get_vrf_details_list(self, context, agent_id, vrf_ids=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_vrf_details_list', agent_id=agent_id,
vrf_ids=vrf_ids, host=host)
@log.log_method_call
def request_endpoint_details(self, context, agent_id, request=None,
host=None):
# Request is a tuple with the device_id as first element, and the
# request ID as second element
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_endpoint_details', agent_id=agent_id,
request=request, host=host)
@log.log_method_call
def request_endpoint_details_list(self, context, agent_id, requests=None,
host=None):
# Requests is a list of tuples with the device_id as first element,
# and the request ID as second element
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_endpoint_details_list',
agent_id=agent_id, requests=requests, host=host)
@log.log_method_call
def request_vrf_details(self, context, agent_id, request=None, host=None):
# Request is a tuple with the vrf_id as first element, and the
# request ID as second element
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_vrf_details', agent_id=agent_id,
request=request, host=host)
@log.log_method_call
def request_vrf_details_list(self, context, agent_id, requests=None,
host=None):
# Requests is a list of tuples with the vrf_id as first element,
# and the request ID as second element
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_vrf_details_list',
agent_id=agent_id, requests=requests, host=host)
@log.log_method_call
def ip_address_owner_update(self, context, agent_id, ip_owner_info,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'ip_address_owner_update', agent_id=agent_id,
ip_owner_info=ip_owner_info, host=host)
class GBPServerRpcCallback(object):
"""Plugin-side RPC (implementation) for agent-to-plugin interaction."""
# History
# 1.0 Initial version
# 1.1 Async request_* APIs
RPC_API_VERSION = "1.1"
target = oslo_messaging.Target(version=RPC_API_VERSION)
def __init__(self, gbp_driver, agent_notifier=None):
self.gbp_driver = gbp_driver
self.agent_notifier = agent_notifier
def get_gbp_details(self, context, **kwargs):
return self.gbp_driver.get_gbp_details(context, **kwargs)
def get_gbp_details_list(self, context, **kwargs):
return [
self.get_gbp_details(
context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def get_vrf_details(self, context, **kwargs):
return self.gbp_driver.get_vrf_details(context, **kwargs)
def get_vrf_details_list(self, context, **kwargs):
return [
self.get_vrf_details(
context,
vrf_id=vrf_id,
**kwargs
)
for vrf_id in kwargs.pop('vrf_ids', [])
]
def request_endpoint_details(self, context, **kwargs):
result = [self.gbp_driver.request_endpoint_details(context, **kwargs)]
# Notify the agent back once the answer is calculated
if result[0]:
self.agent_notifier.opflex_endpoint_update(
context, result, host=kwargs.get('host'))
def request_endpoint_details_list(self, context, **kwargs):
result = []
for request in kwargs.pop('requests', []):
details = self.gbp_driver.request_endpoint_details(
context, request=request, **kwargs)
if details:
result.append(details)
# Notify the agent back once the answer is calculated
# Exclude empty answers as an error as occurred and the agent might
# want to retry
if result:
self.agent_notifier.opflex_endpoint_update(
context, result, host=kwargs.get('host'))
def request_vrf_details(self, context, **kwargs):
result = [self.gbp_driver.request_vrf_details(context, **kwargs)]
# Notify the agent back once the answer is calculated
if result[0]:
self.agent_notifier.opflex_vrf_update(context, result,
host=kwargs.get('host'))
def request_vrf_details_list(self, context, **kwargs):
result = []
for request in kwargs.pop('requests', []):
details = self.gbp_driver.request_vrf_details(
context, request=request, **kwargs)
if details:
result.append(details)
# Notify the agent back once the answer is calculated
# Exclude empty answers as an error as occurred and the agent might
# want to retry
if result:
self.agent_notifier.opflex_vrf_update(
context, [x for x in result if x], host=kwargs.get('host'))
def ip_address_owner_update(self, context, **kwargs):
self.gbp_driver.ip_address_owner_update(context, **kwargs)
class OpenstackRpcMixin(object):
"""A mix-in that enable Opflex agent
support in agent implementations.
"""
target = oslo_messaging.Target(version='1.3')
def subnet_update(self, context, subnet):
self.updated_vrf.add(subnet['tenant_id'])
LOG.debug("subnet_update message processed for subnet %s",
subnet['id'])
def opflex_notify_vrf(self, context, vrf):
self.updated_vrf.add(vrf)
LOG.debug("opflex_notify_vrf message processed for vrf %s", vrf)
def port_update(self, context, **kwargs):
port = kwargs.get('port')
# Put the port identifier in the updated_ports set.
# Even if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def opflex_endpoint_update(self, context, details):
self._opflex_endpoint_update(context, details)
def opflex_vrf_update(self, context, details):
self._opflex_vrf_update(self, context, details)
| 40.879725
| 79
| 0.634247
|
from neutron.common import rpc as n_rpc
from neutron.common import topics
from oslo_log import helpers as log
from oslo_log import log as logging
import oslo_messaging
LOG = logging.getLogger(__name__)
TOPIC_OPFLEX = 'opflex'
ENDPOINT = 'endpoint'
VRF = 'vrf'
NOTIFY_VRF = 'notify-vrf'
class AgentNotifierApi(object):
BASE_RPC_API_VERSION = '1.3'
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.BASE_RPC_API_VERSION)
self.client = n_rpc.get_client(target)
self.topic_port_update = topics.get_topic_name(topic, topics.PORT,
topics.UPDATE)
self.topic_port_delete = topics.get_topic_name(topic, topics.PORT,
topics.DELETE)
self.topic_subnet_update = topics.get_topic_name(topic, topics.SUBNET,
topics.UPDATE)
self.topic_opflex_notify_vrf = topics.get_topic_name(
topic, TOPIC_OPFLEX, NOTIFY_VRF, topics.UPDATE)
self.topic_opflex_endpoint_update = topics.get_topic_name(
topic, TOPIC_OPFLEX, ENDPOINT, topics.UPDATE)
self.topic_opflex_vrf_update = topics.get_topic_name(
topic, TOPIC_OPFLEX, VRF, topics.UPDATE)
def port_update(self, context, port):
host = port.get('binding:host_id')
if host:
cctxt = self.client.prepare(
server=host, topic=self.topic_port_update, version='1.1')
cctxt.cast(context, 'port_update', port=port)
def port_delete(self, context, port):
cctxt = self.client.prepare(fanout=True, topic=self.topic_port_delete,
version='1.1')
cctxt.cast(context, 'port_delete', port=port)
def subnet_update(self, context, subnet):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_subnet_update,
version='1.1')
cctxt.cast(context, 'subnet_update', subnet=subnet)
def opflex_notify_vrf(self, context, vrf):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_opflex_notify_vrf,
version='1.3')
cctxt.cast(context, 'opflex_notify_vrf', vrf=vrf)
def opflex_endpoint_update(self, context, details, host=None):
cctxt = self.client.prepare(
topic=self.topic_opflex_endpoint_update, server=host,
version='1.2')
cctxt.cast(context, 'opflex_endpoint_update', details=details)
def opflex_vrf_update(self, context, details):
cctxt = self.client.prepare(fanout=True,
topic=self.topic_opflex_vrf_update,
version='1.2')
cctxt.cast(context, 'opflex_vrf_update', details=details)
class GBPServerRpcApi(object):
GBP_RPC_VERSION = "1.1"
def __init__(self, topic):
target = oslo_messaging.Target(
topic=topic, version=self.GBP_RPC_VERSION)
self.client = n_rpc.get_client(target)
@log.log_method_call
def get_gbp_details(self, context, agent_id, device=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_gbp_details', agent_id=agent_id,
device=device, host=host)
@log.log_method_call
def get_gbp_details_list(self, context, agent_id, devices=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_gbp_details_list', agent_id=agent_id,
devices=devices, host=host)
@log.log_method_call
def get_vrf_details(self, context, agent_id, vrf_id=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_vrf_details', agent_id=agent_id,
vrf_id=vrf_id, host=host)
@log.log_method_call
def get_vrf_details_list(self, context, agent_id, vrf_ids=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
return cctxt.call(context, 'get_vrf_details_list', agent_id=agent_id,
vrf_ids=vrf_ids, host=host)
@log.log_method_call
def request_endpoint_details(self, context, agent_id, request=None,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_endpoint_details', agent_id=agent_id,
request=request, host=host)
@log.log_method_call
def request_endpoint_details_list(self, context, agent_id, requests=None,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_endpoint_details_list',
agent_id=agent_id, requests=requests, host=host)
@log.log_method_call
def request_vrf_details(self, context, agent_id, request=None, host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_vrf_details', agent_id=agent_id,
request=request, host=host)
@log.log_method_call
def request_vrf_details_list(self, context, agent_id, requests=None,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'request_vrf_details_list',
agent_id=agent_id, requests=requests, host=host)
@log.log_method_call
def ip_address_owner_update(self, context, agent_id, ip_owner_info,
host=None):
cctxt = self.client.prepare(version=self.GBP_RPC_VERSION)
cctxt.call(context, 'ip_address_owner_update', agent_id=agent_id,
ip_owner_info=ip_owner_info, host=host)
class GBPServerRpcCallback(object):
RPC_API_VERSION = "1.1"
target = oslo_messaging.Target(version=RPC_API_VERSION)
def __init__(self, gbp_driver, agent_notifier=None):
self.gbp_driver = gbp_driver
self.agent_notifier = agent_notifier
def get_gbp_details(self, context, **kwargs):
return self.gbp_driver.get_gbp_details(context, **kwargs)
def get_gbp_details_list(self, context, **kwargs):
return [
self.get_gbp_details(
context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def get_vrf_details(self, context, **kwargs):
return self.gbp_driver.get_vrf_details(context, **kwargs)
def get_vrf_details_list(self, context, **kwargs):
return [
self.get_vrf_details(
context,
vrf_id=vrf_id,
**kwargs
)
for vrf_id in kwargs.pop('vrf_ids', [])
]
def request_endpoint_details(self, context, **kwargs):
result = [self.gbp_driver.request_endpoint_details(context, **kwargs)]
if result[0]:
self.agent_notifier.opflex_endpoint_update(
context, result, host=kwargs.get('host'))
def request_endpoint_details_list(self, context, **kwargs):
result = []
for request in kwargs.pop('requests', []):
details = self.gbp_driver.request_endpoint_details(
context, request=request, **kwargs)
if details:
result.append(details)
if result:
self.agent_notifier.opflex_endpoint_update(
context, result, host=kwargs.get('host'))
def request_vrf_details(self, context, **kwargs):
result = [self.gbp_driver.request_vrf_details(context, **kwargs)]
if result[0]:
self.agent_notifier.opflex_vrf_update(context, result,
host=kwargs.get('host'))
def request_vrf_details_list(self, context, **kwargs):
result = []
for request in kwargs.pop('requests', []):
details = self.gbp_driver.request_vrf_details(
context, request=request, **kwargs)
if details:
result.append(details)
if result:
self.agent_notifier.opflex_vrf_update(
context, [x for x in result if x], host=kwargs.get('host'))
def ip_address_owner_update(self, context, **kwargs):
self.gbp_driver.ip_address_owner_update(context, **kwargs)
class OpenstackRpcMixin(object):
target = oslo_messaging.Target(version='1.3')
def subnet_update(self, context, subnet):
self.updated_vrf.add(subnet['tenant_id'])
LOG.debug("subnet_update message processed for subnet %s",
subnet['id'])
def opflex_notify_vrf(self, context, vrf):
self.updated_vrf.add(vrf)
LOG.debug("opflex_notify_vrf message processed for vrf %s", vrf)
def port_update(self, context, **kwargs):
port = kwargs.get('port')
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def opflex_endpoint_update(self, context, details):
self._opflex_endpoint_update(context, details)
def opflex_vrf_update(self, context, details):
self._opflex_vrf_update(self, context, details)
| true
| true
|
f715802eeda042cbb9bf7a01b8eb94abfede69c2
| 2,706
|
py
|
Python
|
modules/cmderr.py
|
patataofcourse/styleventer-archive
|
dc4b82f2903f91990fa9236cb67a9dd92e3e1a2f
|
[
"MIT"
] | 1
|
2021-01-28T16:22:32.000Z
|
2021-01-28T16:22:32.000Z
|
modules/cmderr.py
|
alexdevteam/styleventer-archive
|
303f280049d480b21c6e804e236c90fe3475a074
|
[
"MIT"
] | 1
|
2021-01-16T22:14:36.000Z
|
2021-01-16T22:14:36.000Z
|
modules/cmderr.py
|
patataofcourse/styleventer-archive
|
dc4b82f2903f91990fa9236cb67a9dd92e3e1a2f
|
[
"MIT"
] | 1
|
2021-01-16T22:01:59.000Z
|
2021-01-16T22:01:59.000Z
|
from discord.ext import commands
import discord, sys, os
import traceback
import datetime
from libs import settings
async def oncmderror(ctx: discord.ext.commands.Context, error):
if type(error) == commands.CommandOnCooldown:
if int(error.retry_after) == 0:
await ctx.send("Wait a few seconds before using this command again!")
else:
await ctx.send("Wait at least {} more seconds to use this command again!".format(int(error.retry_after)))
elif type(error) == commands.CommandNotFound:
setting = settings.get_setting("prefix_response_channels", [ctx.message.guild.id])
if setting is None:
await ctx.send("Command `{}` doesn't exist!".format(ctx.message.content.split()[0]))
elif str(ctx.message.channel.id) in setting:
await ctx.send("Command `{}` doesn't exist!".format(ctx.message.content.split()[0]))
elif type(error) == commands.errors.NotOwner:
await ctx.send("That command is only usable by aleok.")
elif type(error) == commands.errors.MissingRequiredArgument:
cmdname = ctx.message.content.split()[0].lstrip(ctx.bot.command_prefix)
command = next(filter(lambda cmd: cmdname in cmd.aliases or cmdname == cmd.name, ctx.bot.commands))
await ctx.send(f"Syntax: `'{command.name} {command.usage}`")
elif type(error) == commands.errors.BadArgument:
await ctx.send(f"Wrong syntax ({str(error)}). Try using `'help command`")
elif type(error) == commands.errors.ExpectedClosingQuoteError:
await ctx.send("Expected a closing quote (\")")
elif type(error) == commands.errors.UnexpectedQuoteError:
await ctx.send("Unexpected quote mark (\") in non-quoted argument")
else:
error_str = "\n".join(traceback.format_exception(type(error), error, error.__traceback__))
timenow = datetime.datetime.now()
errorcode = f"{timenow.year}{timenow.month:02}{timenow.day:02}{timenow.hour:02}{timenow.minute:02}{timenow.second:02}"
errorcode = format(int(errorcode), "X")
await ctx.send(
"There was an unknown error! Please send the following error code to aleok: `{}`".format(errorcode))
try:
owner = ctx.bot.get_user(ctx.bot.owner_id)
if owner is None:
await ctx.send(f"Error `{errorcode}`:```python\n{error_str[:1700]}```")
else:
await owner.send(f"Error `{errorcode}`:```python\n{error_str[:1700]}```")
except Exception as e:
await ctx.send(f"Error IN sending error, yay! (internal cmderr error: {e})")
print(error_str)
def setup(bot, **kwargs):
bot.on_command_error = oncmderror
| 52.038462
| 126
| 0.65558
|
from discord.ext import commands
import discord, sys, os
import traceback
import datetime
from libs import settings
async def oncmderror(ctx: discord.ext.commands.Context, error):
if type(error) == commands.CommandOnCooldown:
if int(error.retry_after) == 0:
await ctx.send("Wait a few seconds before using this command again!")
else:
await ctx.send("Wait at least {} more seconds to use this command again!".format(int(error.retry_after)))
elif type(error) == commands.CommandNotFound:
setting = settings.get_setting("prefix_response_channels", [ctx.message.guild.id])
if setting is None:
await ctx.send("Command `{}` doesn't exist!".format(ctx.message.content.split()[0]))
elif str(ctx.message.channel.id) in setting:
await ctx.send("Command `{}` doesn't exist!".format(ctx.message.content.split()[0]))
elif type(error) == commands.errors.NotOwner:
await ctx.send("That command is only usable by aleok.")
elif type(error) == commands.errors.MissingRequiredArgument:
cmdname = ctx.message.content.split()[0].lstrip(ctx.bot.command_prefix)
command = next(filter(lambda cmd: cmdname in cmd.aliases or cmdname == cmd.name, ctx.bot.commands))
await ctx.send(f"Syntax: `'{command.name} {command.usage}`")
elif type(error) == commands.errors.BadArgument:
await ctx.send(f"Wrong syntax ({str(error)}). Try using `'help command`")
elif type(error) == commands.errors.ExpectedClosingQuoteError:
await ctx.send("Expected a closing quote (\")")
elif type(error) == commands.errors.UnexpectedQuoteError:
await ctx.send("Unexpected quote mark (\") in non-quoted argument")
else:
error_str = "\n".join(traceback.format_exception(type(error), error, error.__traceback__))
timenow = datetime.datetime.now()
errorcode = f"{timenow.year}{timenow.month:02}{timenow.day:02}{timenow.hour:02}{timenow.minute:02}{timenow.second:02}"
errorcode = format(int(errorcode), "X")
await ctx.send(
"There was an unknown error! Please send the following error code to aleok: `{}`".format(errorcode))
try:
owner = ctx.bot.get_user(ctx.bot.owner_id)
if owner is None:
await ctx.send(f"Error `{errorcode}`:```python\n{error_str[:1700]}```")
else:
await owner.send(f"Error `{errorcode}`:```python\n{error_str[:1700]}```")
except Exception as e:
await ctx.send(f"Error IN sending error, yay! (internal cmderr error: {e})")
print(error_str)
def setup(bot, **kwargs):
bot.on_command_error = oncmderror
| true
| true
|
f71581382f809688e495e7651dfc11918e82e216
| 884
|
py
|
Python
|
awwardsApp/urls.py
|
umunadine/Awwards
|
1a862ef64c195e6ab9b38c8e1faf35f224354dbb
|
[
"MIT"
] | null | null | null |
awwardsApp/urls.py
|
umunadine/Awwards
|
1a862ef64c195e6ab9b38c8e1faf35f224354dbb
|
[
"MIT"
] | null | null | null |
awwardsApp/urls.py
|
umunadine/Awwards
|
1a862ef64c195e6ab9b38c8e1faf35f224354dbb
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url,include
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
url(r'^$',views.index,name='index'),
url(r'^accounts/profile/', views.my_profile, name='my_profile'),
url(r'register/',views.register, name='register'),
url(r'project/(\d+)',views.rate_project,name='rate-project'),
url(r'profile/(\d+)',views.profile,name='profile'),
url(r'my_profile',views.my_profile,name='my_profile'),
url(r'^new/project$', views.new_project, name='new_project'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^ratings/', include('star_ratings.urls', namespace='ratings')),
url(r'^accounts/', include('registration.backends.simple.urls')),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 38.434783
| 81
| 0.707014
|
from django.conf.urls import url,include
from django.conf import settings
from . import views
from django.conf.urls.static import static
urlpatterns = [
url(r'^$',views.index,name='index'),
url(r'^accounts/profile/', views.my_profile, name='my_profile'),
url(r'register/',views.register, name='register'),
url(r'project/(\d+)',views.rate_project,name='rate-project'),
url(r'profile/(\d+)',views.profile,name='profile'),
url(r'my_profile',views.my_profile,name='my_profile'),
url(r'^new/project$', views.new_project, name='new_project'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^ratings/', include('star_ratings.urls', namespace='ratings')),
url(r'^accounts/', include('registration.backends.simple.urls')),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| true
| true
|
f71581754c1df790c4b96c28981d61f8e5506370
| 89
|
py
|
Python
|
samples/helloworld.py
|
neumond/minpiler
|
2e37a9e0854383d3974af38e1cb2da0ecb8e2108
|
[
"MIT"
] | 23
|
2020-12-20T03:39:30.000Z
|
2022-03-23T15:47:10.000Z
|
samples/helloworld.py
|
neumond/minpiler
|
2e37a9e0854383d3974af38e1cb2da0ecb8e2108
|
[
"MIT"
] | 15
|
2020-12-21T01:12:22.000Z
|
2021-04-19T10:40:11.000Z
|
samples/helloworld.py
|
neumond/minpiler
|
2e37a9e0854383d3974af38e1cb2da0ecb8e2108
|
[
"MIT"
] | 2
|
2022-02-12T19:19:50.000Z
|
2022-02-12T21:33:35.000Z
|
from minpiler.typeshed import M, message1
M.print('Hello world!')
message1.printFlush()
| 17.8
| 41
| 0.775281
|
from minpiler.typeshed import M, message1
M.print('Hello world!')
message1.printFlush()
| true
| true
|
f715818477d40bfbaf00925d174a3b2a99345b43
| 853
|
py
|
Python
|
reviewboard/reviews/evolutions/file_attachments.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 921
|
2015-01-01T15:26:28.000Z
|
2022-03-29T11:30:38.000Z
|
reviewboard/reviews/evolutions/file_attachments.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 5
|
2015-03-17T18:57:47.000Z
|
2020-10-02T13:24:31.000Z
|
reviewboard/reviews/evolutions/file_attachments.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 285
|
2015-01-12T06:24:36.000Z
|
2022-03-29T11:03:50.000Z
|
from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('ReviewRequest', 'file_attachments', models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('ReviewRequest', 'inactive_file_attachments',
models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('Review', 'file_attachment_comments', models.ManyToManyField,
related_model='reviews.FileAttachmentComment'),
AddField('ReviewRequestDraft', 'file_attachments', models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('ReviewRequestDraft', 'inactive_file_attachments',
models.ManyToManyField,
related_model='attachments.FileAttachment')
]
| 40.619048
| 78
| 0.731536
|
from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('ReviewRequest', 'file_attachments', models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('ReviewRequest', 'inactive_file_attachments',
models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('Review', 'file_attachment_comments', models.ManyToManyField,
related_model='reviews.FileAttachmentComment'),
AddField('ReviewRequestDraft', 'file_attachments', models.ManyToManyField,
related_model='attachments.FileAttachment'),
AddField('ReviewRequestDraft', 'inactive_file_attachments',
models.ManyToManyField,
related_model='attachments.FileAttachment')
]
| true
| true
|
f71581f934458fc27232e1abba28dfc2d9fb50c7
| 2,639
|
py
|
Python
|
trustpayments/models/transaction_comment_create.py
|
TrustPayments/python-sdk
|
6fde6eb8cfce270c3612a2903a845c13018c3bb9
|
[
"Apache-2.0"
] | 2
|
2020-01-16T13:24:06.000Z
|
2020-11-21T17:40:17.000Z
|
postfinancecheckout/models/transaction_comment_create.py
|
pfpayments/python-sdk
|
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
|
[
"Apache-2.0"
] | 4
|
2019-10-14T17:33:23.000Z
|
2021-10-01T14:49:11.000Z
|
postfinancecheckout/models/transaction_comment_create.py
|
pfpayments/python-sdk
|
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
|
[
"Apache-2.0"
] | 2
|
2019-10-15T14:17:10.000Z
|
2021-09-17T13:07:09.000Z
|
# coding: utf-8
import pprint
import six
from enum import Enum
from . import AbstractTransactionCommentActive
class TransactionCommentCreate(AbstractTransactionCommentActive):
swagger_types = {
'transaction': 'int',
}
attribute_map = {
'transaction': 'transaction',
}
_transaction = None
def __init__(self, **kwargs):
self.discriminator = None
self.transaction = kwargs.get('transaction')
super().__init__(**kwargs)
self.swagger_types.update(super().swagger_types)
self.attribute_map.update(super().attribute_map)
@property
def transaction(self):
"""Gets the transaction of this TransactionCommentCreate.
:return: The transaction of this TransactionCommentCreate.
:rtype: int
"""
return self._transaction
@transaction.setter
def transaction(self, transaction):
"""Sets the transaction of this TransactionCommentCreate.
:param transaction: The transaction of this TransactionCommentCreate.
:type: int
"""
if transaction is None:
raise ValueError("Invalid value for `transaction`, must not be `None`")
self._transaction = transaction
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(TransactionCommentCreate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TransactionCommentCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 26.39
| 83
| 0.56726
|
import pprint
import six
from enum import Enum
from . import AbstractTransactionCommentActive
class TransactionCommentCreate(AbstractTransactionCommentActive):
swagger_types = {
'transaction': 'int',
}
attribute_map = {
'transaction': 'transaction',
}
_transaction = None
def __init__(self, **kwargs):
self.discriminator = None
self.transaction = kwargs.get('transaction')
super().__init__(**kwargs)
self.swagger_types.update(super().swagger_types)
self.attribute_map.update(super().attribute_map)
@property
def transaction(self):
return self._transaction
@transaction.setter
def transaction(self, transaction):
if transaction is None:
raise ValueError("Invalid value for `transaction`, must not be `None`")
self._transaction = transaction
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(TransactionCommentCreate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TransactionCommentCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f715842fcaf7266d175e63d68638aed9f2e32e69
| 7,994
|
py
|
Python
|
tagger_ui/ui_model/annotated_images_manager.py
|
RobertMcCarter/animal-finder
|
5ac839a65df62ab312e440ce43416727492e84d8
|
[
"MIT"
] | null | null | null |
tagger_ui/ui_model/annotated_images_manager.py
|
RobertMcCarter/animal-finder
|
5ac839a65df62ab312e440ce43416727492e84d8
|
[
"MIT"
] | null | null | null |
tagger_ui/ui_model/annotated_images_manager.py
|
RobertMcCarter/animal-finder
|
5ac839a65df62ab312e440ce43416727492e84d8
|
[
"MIT"
] | null | null | null |
"""
The business model core of the application.
"""
from typing import List, Union
from PIL import Image
from .annotated_image import AnnotatedImage
from .scaled_region2d import ScaledRegion2d
from .timer import Timer
from src.model import Size2d, Region2d
def clearImagesOutsideRange(
annotatedImages: List[AnnotatedImage],
currentIndex: int,
keepPrevious: int = 10,
keepNext: int = 10,
) -> None:
"""Clear out of memory any loaded images that are outside the given
range (so that we don't continue to collect in-memory images and
consume the user's entire RAM.
"""
# First, figure out our "keep" images in memory range
startIndex = max(0, currentIndex - keepPrevious)
endIndex = min(currentIndex + keepNext, len(annotatedImages) - 1)
# Clear out images outside our range
for i in range(0, startIndex):
annotatedImages[i].image = None
for i in range(endIndex + 1, len(annotatedImages)):
annotatedImages[i].image = None
class AnnotatedImagesManager:
"""Maps the various regions on an annotated image to the screen rectangles
being displayed
"""
def __init__(self, annotatedImages: List[AnnotatedImage]):
assert annotatedImages
self._currentIndex = 0
self.maxViewed = 0
self._annotatedImages = annotatedImages
# ##############################################################################################
# region Properties
# ##############################################################################################
@property
def current(self) -> AnnotatedImage:
"""The currently selected/viewed annotated image"""
return self._annotatedImages[self._currentIndex]
@property
def currentIndex(self) -> int:
"""The current index within the ordered list of images"""
return self._currentIndex
@property
def images(self) -> List[AnnotatedImage]:
"""The ordered list of annotated images"""
return self._annotatedImages
# The current rectangle the user is actively drawing on the screen
# (which could be different from the image coordinates due to a small screen or window size)
activeRegion: Union[ScaledRegion2d, None] = None
def __len__(self):
"""The number of annotated images"""
return len(self._annotatedImages)
@property
def windowSize(self) -> Size2d:
"""The current size of the window where the image is displayed"""
return self._windowSize
@property
def scale(self) -> float:
"""The current scale factor to go from the original image to the scaled (likely down) image"""
return self.current.scale
@property
def regions(self) -> List[ScaledRegion2d]:
"""The ordered collection of region view-models of interest for this image"""
return self.current.regions
# The current rectangle the user is actively drawing on the screen
# (which could be different from the image coordinates due to a small screen or window size)
activeRegion: Union[ScaledRegion2d, None] = None
# The maximum index within the sorted list of annotated images that the user
# has viewed (and presumably processed)
maxViewed: int
# The directory of images this annotated image manager collection represents
saveFileName: str
# endregion
# ##############################################################################################
# region Methods
# ##############################################################################################
def isValidIndex(self, index: int) -> bool:
"""Test if the given index is valid"""
return 0 <= index < len(self._annotatedImages)
def addActiveRegion(self) -> None:
"""Adds a new region to the current image, and returns the scaled region 2d view model"""
if self.activeRegion is None:
return
self.activeRegion.canvasRectId = (
0 # It no longer belongs to that canvas rectangle
)
self.current.addRegion(self.activeRegion)
# User has "used up" the current active region
self.activeRegion = None
def updateActiveScreenRegion(self, screenRegion: Region2d) -> ScaledRegion2d:
"""The view should call this when the active region is changed
(likely the user dragging the mouse).
Returns the active scaled region.
"""
if self.activeRegion is None:
self.activeRegion = ScaledRegion2d(screenRegion)
else:
self.activeRegion.screenRegion = screenRegion
# Now re-scale the screen region to get the "true" image region
self.activeRegion.updateImageFromScreen(self.scale)
return self.activeRegion
def onWindowResized(self, newWindowSize: Size2d) -> Union[float, None]:
"""Update our current image to have the correct scale for the new canvas size
Scale the image according to our current canvas size
Returns the scale factor used to shrink the image to the size of the window,
or None if the image did not change
"""
# Save the new window size
self._windowSize = newWindowSize
# Scale the current image to this size
scale = self.current.scaleImageForSize(newWindowSize)
if scale:
# We need to resize our Tk wrapper image
self.current.wrapImageForTk()
# We changed the scaling factor, so we need to re-scale the active region too
if self.activeRegion:
self.activeRegion.updateScreenFromImage(scale)
def scanForTaggedIndex(self, direction: int) -> int | None:
"""Scan through starting at the current image index for the next
image that is tagged.
direction is either +1 or -1 to control direction.
"""
i = self.currentIndex
while 0 <= i < len(self._annotatedImages):
i += direction
if self._annotatedImages[i].isTagged:
return i
return None
def moveToImage(self, index: int):
"""Open the image with the given index
(into our ordered collection of annotated images that we received from the model layer)
"""
assert self.isValidIndex(index)
# Store the index that we're looking at
self._currentIndex = index
# Update our max viewed index
self.maxViewed = max(self.maxViewed, self._currentIndex)
# Ensure the image is loaded
if self.current.image is None:
self.current.image = Image.open(self.current.filePath)
self.current.image.load()
# Scale the image so it fits while retaining the correct aspect ratio
# Only scale if we haven't already previously scaled the image (which is slow)
# Store it back in our domain logic layer for faster access
self.current.scaleImageForSize(self._windowSize)
# Resize the image for the UI layer, and wrap it for Tk
self.current.loadImage()
self.current.scaleImageForSize(self.windowSize)
self.current.wrapImageForTk()
# Clear images outside of our "keep" window so we don't keep growing our memory footprint!
clearImagesOutsideRange(self._annotatedImages, index, 10, 10)
# endregion
# ##############################################################################################
# region Private data members
# ##############################################################################################
# The collection of annotated images we need to process for our test set
_annotatedImages: List[AnnotatedImage]
# The index into the _annotatedImages array,
# So effectively, which annotated image are we currently looking at?
_currentIndex: int = 0
# The size of the window that is displaying our images
_windowSize: Size2d = Size2d(500, 500)
# endregion
| 37.35514
| 102
| 0.621091
|
from typing import List, Union
from PIL import Image
from .annotated_image import AnnotatedImage
from .scaled_region2d import ScaledRegion2d
from .timer import Timer
from src.model import Size2d, Region2d
def clearImagesOutsideRange(
annotatedImages: List[AnnotatedImage],
currentIndex: int,
keepPrevious: int = 10,
keepNext: int = 10,
) -> None:
startIndex = max(0, currentIndex - keepPrevious)
endIndex = min(currentIndex + keepNext, len(annotatedImages) - 1)
for i in range(0, startIndex):
annotatedImages[i].image = None
for i in range(endIndex + 1, len(annotatedImages)):
annotatedImages[i].image = None
class AnnotatedImagesManager:
def __init__(self, annotatedImages: List[AnnotatedImage]):
assert annotatedImages
self._currentIndex = 0
self.maxViewed = 0
self._annotatedImages = annotatedImages
| true
| true
|
f715861adc117fbbd75adf3f5e6a1228542c06dc
| 97,810
|
py
|
Python
|
src/sos/step_executor.py
|
pgcudahy/sos
|
ee902841003c7630db501101038f370650955ef9
|
[
"BSD-3-Clause"
] | null | null | null |
src/sos/step_executor.py
|
pgcudahy/sos
|
ee902841003c7630db501101038f370650955ef9
|
[
"BSD-3-Clause"
] | null | null | null |
src/sos/step_executor.py
|
pgcudahy/sos
|
ee902841003c7630db501101038f370650955ef9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import ast
import copy
import os
import subprocess
import sys
import time
from collections import defaultdict
from collections.abc import Mapping, Sequence
from typing import List
import zmq
from .controller import close_socket, create_socket, send_message_to_controller
from .messages import encode_msg, decode_msg
from .eval import SoS_eval, SoS_exec, accessed_vars, KeepOnlyImportAndDefine
from .executor_utils import (
__named_output__,
__null_func__,
__output_from__,
__traced__,
clear_output,
create_task,
get_traceback_msg,
reevaluate_output,
statementMD5,
validate_step_sig,
verify_input,
ExecuteError,
)
from .syntax import (
SOS_DEPENDS_OPTIONS,
SOS_INPUT_OPTIONS,
SOS_OUTPUT_OPTIONS,
SOS_TARGETS_OPTIONS,
)
from .targets import (
RemovedTarget,
RuntimeInfo,
UnavailableLock,
sos_variable,
UnknownTarget,
dynamic,
file_target,
sos_step,
sos_targets,
invalid_target
)
from .tasks import MasterTaskParams, TaskFile
from .utils import (
ArgumentError,
StopInputGroup,
TerminateExecution,
env,
get_traceback,
short_repr,
ProcessKilled,
get_localhost_ip,
textMD5,
)
__all__: List = []
class TaskManager:
# manage tasks created by the step
def __init__(self, num_tasks, trunk_size, trunk_workers):
super(TaskManager, self).__init__()
self.num_tasks = num_tasks
import math
self._slots = [[] for x in range(math.ceil(num_tasks / trunk_size))]
self._last_slot_size = (
trunk_size if (num_tasks % trunk_size == 0) else (num_tasks % trunk_size)
)
self.trunk_size = trunk_size
self.trunk_workers = trunk_workers
self._submitted_tasks = []
# entire groups
self._unsubmitted_slots = []
# collection of partial groups if some tasks are completed
self._unsubmitted_tasks = []
# derived from _unsubmitted_slots
self._all_ids = []
self._all_output = []
#
self._terminate = False
#
self._tags = {}
def set(self, idx, task_def):
slot = idx // self.trunk_size
#
# slot [
# [idx, None] <- for empty
# [idx, taskdef] <- for non empty
# ]
self._slots[slot].append([idx, task_def])
# the slot is full
if len(self._slots[slot]) == self.trunk_size or (
slot == len(self._slots) - 1
and len(self._slots[slot]) == self._last_slot_size
):
# if there are valida tasks
if not all([x[1] is None for x in self._slots[slot]]):
# remove empty tasks and sort by id
if self.trunk_size == 1 or any(x[1] is None for x in self._slots[slot]):
# if partial, sent to partial list
self._unsubmitted_tasks.extend(
[x[1] for x in self._slots[slot] if x[1] is not None]
)
else:
self._unsubmitted_slots.append(
sorted(self._slots[slot], key=lambda x: x[0])
)
# clear skit
self._slots[slot] = []
if not task_def:
return
if isinstance(task_def[2], Sequence):
self._all_output.extend(task_def[2])
self._all_ids.append(task_def[0])
self._tags[task_def[0]] = task_def[1].tags
def tags(self, task_id):
return self._tags.get(task_id, [])
def index_of(self, task_id):
if task_id in self._all_ids:
return self._all_ids.index(task_id)
else:
return -1
def has_output(self, output):
if not isinstance(output, Sequence) or not self._unsubmitted_slots:
return False
return any(x in self._all_output for x in output)
def get_job(self, all_tasks=False):
# single tasks
ids = []
# submit all tasks without trunk, easy
for slot in self._unsubmitted_slots:
# create a master task
master = MasterTaskParams(self.trunk_workers)
for _, (task_id, taskdef, _) in slot:
master.push(task_id, taskdef)
ids.append(master.ID)
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
self._unsubmitted_slots = []
# individual tasks...
if self.trunk_size == 1 or all_tasks:
to_be_submitted = self._unsubmitted_tasks
[
to_be_submitted.extend([x[1] for x in slot if x[1] is not None])
for slot in self._slots
if slot
]
self._unsubmitted_tasks = []
else:
# save complete blocks
num_tasks = (
len(self._unsubmitted_tasks) // self.trunk_size * self.trunk_size
)
to_be_submitted = self._unsubmitted_tasks[:num_tasks]
self._unsubmitted_tasks = self._unsubmitted_tasks[num_tasks:]
if self.trunk_size == 1 or (all_tasks and len(self._unsubmitted_tasks) == 1):
for task_id, taskdef, _ in to_be_submitted:
# if the task file, perhaps it is already running, we do not change
# the task file. Otherwise we are changing the status of the task
TaskFile(task_id).save(taskdef)
send_message_to_controller(
[
"workflow_sig",
"task",
task_id,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(task_id)
else:
master = None
for task_id, taskdef, _ in to_be_submitted:
if master is not None and master.num_tasks() == self.trunk_size:
ids.append(master.ID)
TaskFile(master.ID).save(master)
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
master = None
if master is None:
master = MasterTaskParams(self.trunk_workers)
master.push(task_id, taskdef)
# the last piece
if master is not None:
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(master.ID)
if not ids:
return None
self._submitted_tasks.extend(ids)
return ids
def clear_submitted(self):
self._submitted_tasks = []
def expand_input_files(*args, **kwargs):
# if unspecified, use __step_output__ as input (default)
# resolve dynamic input.
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
# if no input,
if not args and not kwargs:
return env.sos_dict["step_input"]
# if only group_by ...
elif not args and all(x in SOS_TARGETS_OPTIONS for x in kwargs.keys()):
return sos_targets(
env.sos_dict["step_input"],
_verify_existence=env.config["error_mode"] != "ignore",
**kwargs,
)
else:
return sos_targets(
*args,
**kwargs,
_verify_existence=env.config["error_mode"] != "ignore",
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_depends_files(*args, **kwargs):
"""handle directive depends"""
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
return sos_targets(
*args,
**kwargs,
_verify_existence=True,
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_output_files(value, *args, **kwargs):
"""Process output files (perhaps a pattern) to determine input files."""
if any(isinstance(x, dynamic) for x in args) or any(
isinstance(y, dynamic) for y in kwargs.values()
):
return sos_targets(_undetermined=value)
else:
return sos_targets(
*args, **kwargs, _undetermined=False, _source=env.sos_dict["step_name"]
)
def parse_shared_vars(option):
shared_vars = set()
if not option:
return shared_vars
if isinstance(option, str):
shared_vars.add(option)
elif isinstance(option, Mapping):
for val in option.values():
shared_vars |= accessed_vars(val, mode="eval")
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
shared_vars.add(item)
elif isinstance(item, Mapping):
for val in item.values():
shared_vars |= accessed_vars(val, mode="eval")
return shared_vars
def evaluate_shared(vars, option):
# handle option shared and store variables in a "__shared_vars" variable
shared_vars = {}
env.sos_dict.quick_update(vars[-1])
for key in vars[-1].keys():
try:
if key in ("output", "depends", "input"):
env.logger.warning(
f"Cannot overwrite variable step_{key} from substep variable {key}"
)
else:
env.sos_dict.set("step_" + key, [x[key] for x in vars])
except Exception as e:
env.logger.warning(f"Failed to create step level variable step_{key}: {e}")
if isinstance(option, str):
if option in env.sos_dict:
shared_vars[option] = env.sos_dict[option]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(option, Mapping):
for var, val in option.items():
try:
if var == val:
shared_vars[var] = env.sos_dict[var]
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
# if there are dictionaries in the sequence, e.g.
# shared=['A', 'B', {'C':'D"}]
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
if item in env.sos_dict:
shared_vars[item] = env.sos_dict[item]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(item, Mapping):
for var, val in item.items():
try:
if var == val:
continue
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str or mapping are accepted in sequence: {option}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str, sequence, or mapping are accepted in sequence: {option}"
)
return shared_vars
def get_value_of_param(name, param_list, extra_dict={}):
tree = ast.parse(f"__null_func__({param_list})")
# x.func can be an attribute (e.g. a.b()) and do not have id
kwargs = [
x for x in ast.walk(tree) if x.__class__.__name__ == "keyword" and x.arg == name
]
if not kwargs:
return []
try:
return [ast.literal_eval(kwargs[0].value)]
except Exception:
return [
eval(
compile(
ast.Expression(body=kwargs[0].value),
filename="<string>",
mode="eval",
),
extra_dict,
)
]
def is_sos_run_the_only_last_stmt(stmt):
tree = ast.parse(stmt)
return (
len(tree.body) >= 1
and isinstance(tree.body[-1], ast.Expr)
and isinstance(tree.body[-1].value, ast.Call)
and hasattr(tree.body[-1].value.func, "id")
and tree.body[-1].value.func.id == "sos_run"
and len(
[
x
for x in ast.walk(tree)
if isinstance(x, ast.Call)
and hasattr(x.func, "id")
and x.func.id == "sos_run"
]
)
== 1
)
class Base_Step_Executor:
# This base class defines how steps are executed. The derived classes will reimplement
# some function to behave differently in different modes.
#
def __init__(self, step):
self.step = step
self.task_manager = None
self.exec_error = ExecuteError(self.step.step_name())
#
# Functions that should be redefined in derived class
#
def submit_tasks(self, tasks):
raise RuntimeError("Undefined base function submit_tasks")
def wait_for_tasks(self, tasks, all_submitted):
# this will be redefined in subclasses
raise RuntimeError("Undefined base function wait_for_tasks")
def wait_for_subworkflows(self, allow_pending=0):
raise RuntimeError("Undefined base function wait_for_subworkflows")
def handle_unknown_target(self, e):
raise RuntimeError("Undefined base function handle_unknown_target")
def init_input_output_vars(self):
# if there is __step_output__ from previous step, use it as default input
# otherwise, reset to empty
if (
"__step_output__" not in env.sos_dict
or env.sos_dict["__step_output__"].unspecified()
):
env.sos_dict.set("step_input", sos_targets([]))
else:
env.sos_dict.set("step_input", env.sos_dict["__step_output__"])
# input can be Undetermined from undetermined output from last step
env.sos_dict.set("_input", copy.deepcopy(env.sos_dict["step_input"]))
# if there is default output for auxiliary steps, use it as step_output and _output
# otherwise reset to unspecified.
if "__default_output__" in env.sos_dict:
# if step is triggered by sos_step, it should not be considered as
# output of the step. #981
env.sos_dict.set(
"__default_output__",
sos_targets(
[
x
for x in env.sos_dict["__default_output__"]._targets
if not isinstance(x, sos_step)
]
),
)
env.sos_dict.set(
"step_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
env.sos_dict.set(
"_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
else:
env.sos_dict.set("step_output", sos_targets([]))
# output is said to be unspecified until output: is used
env.sos_dict.set("_output", sos_targets(_undetermined=True))
env.sos_dict.set("step_depends", sos_targets([]))
env.sos_dict.set("_depends", sos_targets([]))
#
# Common functions
#
def verify_output(self):
missing = sos_targets([])
if env.sos_dict["step_output"] is None:
return
if not env.sos_dict["step_output"].valid():
raise RuntimeError(
"Output of a completed step cannot be undetermined or unspecified."
)
for target in env.sos_dict["step_output"]:
if isinstance(target, (sos_step, invalid_target)):
continue
if isinstance(target, str):
if not file_target(target).target_exists("any"):
if env.config["run_mode"] == "dryrun":
# in dryrun mode, we just create these targets
file_target(target).create_placeholder()
else:
# latency wait for 2 seconds because the file system might be slow
if env.config["run_mode"] == "run":
time.sleep(2)
if not file_target(target).target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]} (curdir={os.getcwd()})'
)
elif not target.target_exists("any"):
if env.config["run_mode"] == "dryrun":
target.create_placeholder()
else:
if env.config["run_mode"] == "run":
time.sleep(2)
if not target.target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]}'
)
return missing
# directive input
def process_input_args(self, ifiles: sos_targets, **kwargs):
"""This function handles directive input and all its parameters.
It
determines and set __step_input__
determines and set pattern variables if needed
returns
_groups
_vars
which are groups of _input and related _vars
"""
if ifiles.unspecified():
env.sos_dict.set("step_input", sos_targets([]))
env.sos_dict.set("_input", sos_targets([]))
env.sos_dict.set("step_output", sos_targets())
return [sos_targets([])], [{}]
assert isinstance(ifiles, sos_targets)
if env.sos_dict.get("__dynamic_input__", False):
runner = self.verify_dynamic_targets(
[x for x in ifiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# input file is the filtered files
env.sos_dict.set("step_input", ifiles)
env.sos_dict.set("_input", ifiles)
if ifiles._num_groups() == 0:
ifiles._group("all")
#
return ifiles.groups
def verify_dynamic_targets(self, target):
yield None
return True
def process_depends_args(self, dfiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_DEPENDS_OPTIONS:
raise RuntimeError(f"Unrecognized depends option {k}")
if dfiles.undetermined():
raise ValueError(r"Depends needs to handle undetermined")
if env.sos_dict.get("__dynamic_depends__", False):
runner = self.verify_dynamic_targets(
[x for x in dfiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.set("_depends", dfiles)
env.sos_dict.set("step_depends", dfiles)
def process_output_args(self, ofiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_OUTPUT_OPTIONS:
raise RuntimeError(f"Unrecognized output option {k}")
if ofiles._num_groups() > 0:
if ofiles._num_groups() == 1:
ofiles = ofiles._get_group(0)
elif ofiles._num_groups() != len(self._substeps):
raise RuntimeError(
f"Inconsistent number of output ({ofiles._num_groups()}) and input ({len(self._substeps)}) groups."
)
else:
ofiles = ofiles._get_group(env.sos_dict["_index"])
# create directory
if ofiles.valid():
parents = set(
[
os.path.abspath(os.path.join(ofile, os.pardir))
for ofile in ofiles
if isinstance(ofile, file_target)
]
)
for parent_dir in parents:
if parent_dir and not os.path.isdir(parent_dir):
os.makedirs(parent_dir, exist_ok=True)
# set variables
env.sos_dict.set("_output", ofiles)
env.sos_dict.set("step_output", ofiles)
#
for ofile in ofiles:
oname = ofile.target_name()
if oname in self._all_outputs:
raise ValueError(
f'Output {ofile} from substep {env.sos_dict["_index"]} of {env.sos_dict["__num_groups__"]} substeps overlaps with output from a previous substep.'
)
self._all_outputs.add(oname)
def submit_task(self, task_info):
if self.task_manager is None:
if self.step.task_params:
for key in ("trunk_size", "trunk_workers", "queue"):
val = get_value_of_param(
key, self.step.task_params, extra_dict=env.sos_dict.dict()
)
if val:
env.sos_dict["_runtime"][key] = val[0]
if "trunk_size" in env.sos_dict["_runtime"]:
trunk_size = env.sos_dict["_runtime"]["trunk_size"]
if trunk_size is None or trunk_size <= 0:
trunk_size = env.sos_dict["__num_groups__"]
if not isinstance(trunk_size, int):
raise ValueError(
f'An integer value or None is expected for runtime option trunk_size, "{trunk_size}" provided'
)
else:
trunk_size = 1
if "trunk_workers" in env.sos_dict["_runtime"]:
if "nodes" in env.sos_dict["_runtime"]:
raise ValueError(
'Option "trunk_workers" that specifies number of nodes and processes for the execution '
'of single-node jobs and option "nodes" that specifies number of nodes for single multi-node '
"jobs cannot be used at the same time."
)
trunk_workers = env.sos_dict["_runtime"]["trunk_workers"]
else:
trunk_workers = None
# if 'queue' in env.sos_dict['_runtime'] and env.sos_dict['_runtime']['queue']:
# host = env.sos_dict['_runtime']['queue']
# else:
# # otherwise, use workflow default
# host = '__default__'
self.task_manager = TaskManager(
env.sos_dict["__num_groups__"], trunk_size, trunk_workers
)
task_id = task_info["task_id"]
task_index = task_info["index"]
if task_id is None:
self.task_manager.set(task_index, None)
return None
taskdef = task_info["task_def"]
task_vars = task_info["task_vars"]
# 618
# it is possible that identical tasks are executed (with different underlying random numbers)
# we should either give a warning or produce different ids...
if self.task_manager.index_of(task_id) >= 0:
raise RuntimeError(
f'Task {task_id} generated for (_index={env.sos_dict["_index"]}) is identical to a previous one (_index={self.task_manager.index_of(task_id)}).'
)
elif self.task_manager.has_output(task_vars["_output"]):
raise RuntimeError(
f'Task produces output files {", ".join(task_vars["_output"])} that are output of other tasks.'
)
# if no trunk_size, the job will be submitted immediately
# otherwise tasks will be accumulated and submitted in batch
self.task_manager.set(task_index, (task_id, taskdef, task_vars["_output"]))
tasks = self.task_manager.get_job()
if tasks:
self.submit_tasks(tasks)
return task_id
def wait_for_results(self, all_submitted):
# this is a generator function because wait_for_tasks is a generator
# function and needs to yield to the caller
if self.concurrent_substep:
try:
runner = self.wait_for_substep()
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
if self.task_manager is None:
return {}
#
# report task
# what we should do here is to get the alias of the Host
# because it can be different (e.g. not localhost
queue = env.sos_dict["_runtime"]["queue"]
# submit the last batch of tasks
tasks = self.task_manager.get_job(all_tasks=True)
if tasks:
self.submit_tasks(tasks)
# waiting for results of specified IDs
try:
# 1218
runner = self.wait_for_tasks(
self.task_manager._submitted_tasks, all_submitted
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
results = e.value
for id, result in results.items():
# turn to string to avoid naming lookup issue
rep_result = {
x: (y if isinstance(y, (int, bool, float, str)) else short_repr(y))
for x, y in result.items()
}
rep_result["tags"] = " ".join(self.task_manager.tags(id))
rep_result["queue"] = queue
send_message_to_controller(["workflow_sig", "task", id, repr(rep_result)])
self.task_manager.clear_submitted()
# if in dryrun mode, we display the output of the dryrun task
if env.config["run_mode"] == "dryrun":
tid = list(results.keys())[0]
tf = TaskFile(tid)
if tf.has_stdout():
print(TaskFile(tid).stdout)
for idx, task in self.proc_results.items():
# if it is done
if isinstance(task, dict):
continue
if task in results:
self.proc_results[idx] = results[task]
else:
# can be a subtask
for _, mres in results.items():
if "subtasks" in mres and task in mres["subtasks"]:
self.proc_results[idx] = mres["subtasks"][task]
# elif 'exception' in mres:
# self.proc_results[idx] = mres
#
# check if all have results?
if any(isinstance(x, str) for x in self.proc_results.values()):
raise RuntimeError(
f'Failed to get results for tasks {", ".join(x for x in self.proc_results.values() if isinstance(x, str))}'
)
#
for idx, res in self.proc_results.items():
if "skipped" in res and res["skipped"]:
self.completed["__task_skipped__"] += 1
# complete case: task skipped
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
# complete case: task completed
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.completed["__task_completed__"] += 1
if "shared" in res:
self.shared_vars[idx].update(res["shared"])
def log(self, stage=None, msg=""):
if stage == "start":
env.logger.info(
f'{"Checking" if env.config["run_mode"] == "dryrun" else "Running"} ``{self.step.step_name(True)}``: {self.step.comment.strip()}'
)
elif stage == "input statement":
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("STEP", f"Handling input statement {msg}")
elif stage == "_input":
if env.sos_dict["_input"] is not None and len(env.sos_dict["_input"]) > 0:
env.logger.debug(
f'_input: ``{short_repr(env.sos_dict["_input"])}``{msg}'
)
elif stage == "_depends":
if env.sos_dict["_depends"] is not None:
env.logger.debug(
f'_depends: ``{short_repr(env.sos_dict["_depends"])}``{msg}'
)
elif stage == "input":
if env.sos_dict["step_input"] is not None:
env.logger.info(
f'input: ``{short_repr(env.sos_dict["step_input"])}``{msg}'
)
elif stage == "output":
if (
env.sos_dict["step_output"] is not None
and len(env.sos_dict["step_output"]) > 0
):
env.logger.info(
f'``{self.step.step_name(True)}`` output: ``{short_repr(env.sos_dict["step_output"])}``{msg}'
)
def execute(self, stmt, return_result=False):
try:
self.last_res = SoS_exec(
stmt,
return_result=return_result or env.config["run_mode"] == "interactive",
)
if return_result:
return self.last_res
except (StopInputGroup, TerminateExecution, UnavailableLock):
raise
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr)
except ArgumentError:
raise
except ProcessKilled:
raise
except KeyboardInterrupt as e:
raise RuntimeError(get_traceback_msg(e))
except Exception as e:
raise RuntimeError(get_traceback_msg(e))
def prepare_substep(self):
# socket to collect result
self.result_pull_socket = create_socket(
env.zmq_context, zmq.PULL, "substep result collector"
)
local_ip = get_localhost_ip()
port = self.result_pull_socket.bind_to_random_port(f"tcp://{local_ip}")
env.config["sockets"]["result_push_socket"] = f"tcp://{local_ip}:{port}"
def submit_substep(self, param):
send_message_to_controller(["substep", param])
def process_returned_substep_result(self, till=None, wait=True):
while True:
if not wait:
# 1213
cur_index = env.sos_dict["_index"]
pending_substeps = cur_index - self._completed_concurrent_substeps + 1
if pending_substeps < (
100
if isinstance(self.concurrent_substep, bool)
else self.concurrent_substep
):
if not self.result_pull_socket.poll(0):
return
elif (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
# if there are more than 100 pending substeps
# we wait indefinitely for the results
env.log_to_file(
"STEP",
f"Wait for more substeps to be done before submitting. (index={cur_index}, processed={self._completed_concurrent_substeps})",
)
elif self._completed_concurrent_substeps == till:
return
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
if isinstance(res["exception"], ProcessKilled):
raise res["exception"]
elif isinstance(res["exception"], RemovedTarget):
pass
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"""Ignoring error from ``{self.step.step_name(True)}`` {idx_msg}: {res["exception"]}."""
)
res["output"] = sos_targets(invalid_target())
elif env.config["error_mode"] == "abort":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
# try to stop everything but wait till for submitted tasks to
# complete
self._completed_concurrent_substeps + 1
waiting = till - 1 - self._completed_concurrent_substeps
env.logger.warning(
f'``{self.step.step_name(True)}`` {idx_msg} returns an error.{f" Terminating step after completing {waiting} submitted substeps." if waiting else " Terminating now."}'
)
for i in range(waiting):
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
self.exec_error.append(
f'index={res["index"]}', res["exception"]
)
raise self.exec_error
else:
# default or unspecified
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
#
if "index" not in res:
raise RuntimeError(
"Result received from substep does not have key index"
)
if "task_id" in res:
task = self.submit_task(res)
# if substep returns tasks, ...
if res["task_id"]:
self.proc_results[res["index"]] = task
else:
# if there is no task_id, the substep must have
# been skipped.
self.proc_results[res["index"]] = res
else:
self.proc_results[res["index"]] = res
self._completed_concurrent_substeps += 1
def wait_for_substep(self):
while self._completed_concurrent_substeps < len(self.proc_results):
try:
runner = self.process_returned_substep_result(
till=len(self.proc_results), wait=True
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
def collect_result(self):
# only results will be sent back to the master process
#
# __step_input__: input of this step
# __steo_output__: output of this step
# __step_depends__: dependent files of this step
result = {
"__step_input__": env.sos_dict["step_input"],
"__step_output__": env.sos_dict["step_output"],
"__step_depends__": env.sos_dict["step_depends"],
"__step_name__": env.sos_dict["step_name"],
"__completed__": self.completed,
}
result["__last_res__"] = self.last_res
result["__shared__"] = {}
if "shared" in self.step.options:
result["__shared__"] = self.shared_vars
for x in result["__step_output__"].targets:
if isinstance(x, sos_variable):
result["__shared__"][x.target_name()] = env.sos_dict[x.target_name()]
send_message_to_controller(
[
"progress",
"step_completed",
-1
if "sos_run" in env.sos_dict["__signature_vars__"]
else self.completed["__step_completed__"],
env.sos_dict["step_name"],
env.sos_dict["step_output"],
]
)
return result
def set_task_queue_from_task_params(self):
if self.step.task_params:
try:
task_queue = get_value_of_param(
"queue", self.step.task_params, extra_dict=env.sos_dict.dict()
)
if task_queue:
env.sos_dict["_runtime"]["queue"] = task_queue[0]
except Exception as e:
raise ValueError(
f"Failed to determine value of parameter queue of {self.step.task_params}: {e}"
)
# # check concurrent #1134
# try:
# task_concurrency = get_value_of_param(
# 'concurrent',
# self.step.task_params,
# extra_dict=env.sos_dict.dict())
# if task_concurrency:
# env.sos_dict['_runtime']['concurrent'] = task_concurrency[0]
# except Exception as e:
# raise ValueError(
# f'Failed to determine value of parameter queue of {self.step.task_params}: {e}'
# )
# if -q is unspecified and option queue is unspecified,
# or queue=None is specified, disregard the task keyword
if (
env.config["default_queue"] is None
and "queue" not in env.sos_dict["_runtime"]
) or (
"queue" in env.sos_dict["_runtime"]
and env.sos_dict["_runtime"]["queue"] is None
):
# remove task statement
if len(self.step.statements) >= 1 and self.step.statements[-1][0] == "!":
self.step.statements[-1][1] += "\n" + self.step.task
else:
self.step.statements.append(["!", self.step.task])
self.step.task = None
# is queue is unspecified, it take value from command line
# in this case -q should have been specified
elif "queue" not in env.sos_dict["_runtime"]:
env.sos_dict["_runtime"]["queue"] = env.config["default_queue"]
def local_exec_without_signature(self, statement):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP", f'Execute substep {env.sos_dict["step_name"]} without signature'
)
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
finally:
if not self.step.task:
# if no task, this step is __completed
# complete case: local skip without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
def local_exec_with_signature(self, statement, sig):
idx = env.sos_dict["_index"]
# signature might be built outside of the function
# not in a debug mode delayed to now
if sig is None:
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
# if singaure match, we skip the substep even if
# there are tasks.
matched = validate_step_sig(sig)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
return True
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
sig.lock()
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
finally:
# if this is the end of substep, save the signature
# otherwise we need to wait for the completion
# of the task.
if not self.step.task:
if env.sos_dict["step_output"].undetermined():
output = reevaluate_output()
self.output_groups[env.sos_dict["_index"]] = output
sig.set_output(output)
sig.write()
# complete case : local execution without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
self.pending_signatures[idx] = sig
sig.release()
return False
def skip_substep(self):
idx = env.sos_dict["_index"]
# if concurrent substep, there might be later steps that needs to be rerun
# and we need to mark some steps has been completed.
if self.concurrent_substep:
self._completed_concurrent_substeps += 1
self.proc_results[idx] = {
"index": idx,
"ret_code": 0,
"output": copy.deepcopy(env.sos_dict["_output"]),
}
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
def concurrent_exec(self, statement, sig=None):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} {idx} concurrently with {self._completed_concurrent_substeps} completed',
)
# the ignatures are supposed to be written by substep worker, however
# the substep worker might send tasks back to the step worker and
# we should write the signatures after the tasks are completed
if (
env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
and self.step.task
):
self.pending_signatures[idx] = (
sig
if sig
else RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
)
#
# step_output: needed only when it is undetermined
# step_input: not needed
# _input, _output, _depends, _index: needed
# step_name: for debug scripts
# step_id, workflow_id: for reporting to controller
# '__signature_vars__' to be used for signature creation
#
# __step_context__ is not needed because substep
# executor does not support nested workflow
proc_vars = (
env.sos_dict["__signature_vars__"]
| env.sos_dict["__environ_vars__"]
| {
"_input",
"_output",
"_depends",
"_index",
"step_output",
"step_name",
"_runtime",
"step_id",
"workflow_id",
"__num_groups__",
"__signature_vars__",
}
)
self.proc_results[env.sos_dict["_index"]] = {}
self.submit_substep(
dict(
stmt=statement[1],
global_def=self.step.global_def,
# 1225: the step might contain large variables from global section, but
# we do not have to sent them if they are not used in substeps.
cwd=os.getcwd(),
global_vars={
x: y
for x, y in self.step.global_vars.items()
if x in env.sos_dict["__signature_vars__"]
},
task=self.step.task,
task_params=self.step.task_params,
proc_vars=env.sos_dict.clone_selected_vars(proc_vars),
shared_vars=self.vars_to_be_shared,
config=env.config,
)
)
def check_task_sig(self):
idx = env.sos_dict["_index"]
sig = RuntimeInfo(
statementMD5([self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
env.log_to_file(
"STEP",
f'Check task-only step {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[env.sos_dict["_index"]] = matched["output"]
self.shared_vars[env.sos_dict["_index"]].update(matched["vars"])
# complete case: step with task ignored
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.pending_signatures[idx] = sig
return skip_index
# def is_task_active(self):
# active = env.sos_dict['_runtime']['active']
# env.logger.error(active)
# if active is True:
# return True
# elif active is False:
# return False
# elif isinstance(active, int):
# if active >= 0 and env.sos_dict['_index'] != active:
# return False
# if active < 0 and env.sos_dict[
# '_index'] != active + env.sos_dict['__num_groups__']:
# return False
# return True
# elif isinstance(active, Sequence):
# allowed_index = list([
# x if x >= 0 else env.sos_dict['__num_groups__'] + x
# for x in active
# ])
# return env.sos_dict['_index'] in allowed_index
# elif isinstance(active, slice):
# allowed_index = list(range(env.sos_dict['__num_groups__']))[active]
# return env.sos_dict['_index'] in allowed_index
# else:
# raise RuntimeError(
# f'Unacceptable value for option active: {active}')
def check_results(self):
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] == 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
# now that output is settled, we can write remaining signatures
for idx, res in self.proc_results.items():
if (
self.pending_signatures[idx] is not None
and res["ret_code"] == 0
and "sig_skipped" not in res
):
# task might return output with vars #1355
self.pending_signatures[idx].set_output(self.output_groups[idx])
self.pending_signatures[idx].write()
if res["ret_code"] != 0 and "output" in res:
clear_output(output=res["output"])
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] != 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
if "exception" in proc_result:
excp = proc_result["exception"]
if isinstance(excp, StopInputGroup):
if excp.message:
env.logger.info(excp.message)
self.output_groups[proc_result["index"]] = sos_targets([])
elif isinstance(excp, RemovedTarget):
raise excp
elif "task" in proc_result:
if env.config["error_mode"] == "ignore":
env.logger.warning(f"Ignore failed task {proc_result['task']}.")
# if the exception is from a task...
self.exec_error.append(proc_result["task"], excp)
else:
self.exec_error.append(
RuntimeError(
f"Substep failed with return code {proc_result['ret_code']}"
)
)
# this is after all substeps have been completed
if self.exec_error.errors:
raise self.exec_error
def calculate_completed(self):
substeps = (
self.completed["__substep_completed__"]
+ self.completed["__substep_skipped__"]
)
self.completed["__step_completed__"] = (
self.completed["__substep_completed__"] / substeps
)
self.completed["__step_skipped__"] = (
self.completed["__substep_skipped__"] / substeps
)
if self.completed["__step_completed__"].is_integer():
self.completed["__step_completed__"] = int(
self.completed["__step_completed__"]
)
if self.completed["__step_skipped__"].is_integer():
self.completed["__step_skipped__"] = int(self.completed["__step_skipped__"])
def run(self):
"""Execute a single step and return results. The result for batch mode is the
input, output etc returned as alias, and for interactive mode is the return value
of the last expression."""
# return value of the last executed statement
self.last_res = None
self.start_time = time.time()
self.completed = defaultdict(int)
#
# prepare environments, namely variables that can be used by the step
#
# * step_name: name of the step, can be used by step process to determine
# actions dynamically.
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set("__last_step__", self.step.last_step)
self.log("start")
env.sos_dict.set(
"step_id",
textMD5(
f'{env.sos_dict["workflow_id"]} {env.sos_dict["step_name"]} {self.step.md5}'
),
)
env.sos_dict.set("master_id", env.config["master_id"])
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
env.sos_dict.set("_runtime", {})
# * input: input files, which should be __step_output__ if it is defined, or
# None otherwise.
# * _input: first batch of input, which should be input if no input statement is used
# * output: None at first, can be redefined by output statement
# * _output: None at first, can be redefined by output statement
# * depends: None at first, can be redefined by depends statement
# * _depends: None at first, can be redefined by depends statement
#
self.init_input_output_vars()
# _index is needed for pre-input action's active option and for debug output of scripts
env.sos_dict.set("_index", 0)
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file(
"STEP",
f'Executing step {env.sos_dict["step_name"]} with step_input {env.sos_dict["step_input"]} and step_output {env.sos_dict["step_output"]}',
)
self.set_task_queue_from_task_params()
# look for input statement.
input_statement_idx = [
idx
for idx, x in enumerate(self.step.statements)
if x[0] == ":" and x[1] == "input"
]
if not input_statement_idx:
input_statement_idx = None
elif len(input_statement_idx) == 1:
input_statement_idx = input_statement_idx[0]
else:
raise ValueError(
f"More than one step input are specified in step {self.step.step_name(True)}"
)
# if shared is true, we have to disable concurrent because we
# do not yet return anything from shared.
self.concurrent_substep = "shared" not in self.step.options
# and \
# ('concurrent' not in env.sos_dict['_runtime'] or env.sos_dict['_runtime']['concurrent'] is True)
if input_statement_idx is not None:
# execute before input stuff
for statement in self.step.statements[:input_statement_idx]:
if statement[0] == ":":
# wait for all dependent targets to be resolved to be resolved
key, value = statement[1:3]
if key != "depends":
raise ValueError(f"Step input should be specified before {key}")
while True:
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(dfiles, **kwargs)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
break
else:
try:
# 1354
# if there are definition before input, the definitions and imports
# must be added to global_def in order to be executed by substeps
if any(x in statement[1] for x in ("class", "def", "import")):
step_def = KeepOnlyImportAndDefine().visit(
ast.parse(statement[1])
)
if step_def.body:
if isinstance(self.step.global_def, ast.Module):
self.step.global_def.body.extend(step_def.body)
else:
self.step.global_def = step_def
self.execute(statement[1])
except StopInputGroup as e:
# stop before substeps, because there is no output statement before it
# we do not have to worry about keep_output
if e.message:
env.logger.info(e.message)
return self.collect_result()
# input statement
stmt = self.step.statements[input_statement_idx][2]
self.log("input statement", stmt)
while True:
# wait for all targets to be resovled
try:
args, kwargs = SoS_eval(
f"__null_func__({stmt})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# Files will be expanded differently with different running modes
input_files: sos_targets = expand_input_files(
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_INPUT_OPTIONS
},
)
runner = self.process_input_args(
input_files,
**{k: v for k, v in kwargs.items() if k in SOS_INPUT_OPTIONS},
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
self._substeps = e.value
#
if "concurrent" in kwargs and self.concurrent_substep:
# concurrent can be True/False or an integer
self.concurrent_substep = kwargs["concurrent"]
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise ValueError(f"Failed to process input statement {stmt}: {e}")
break
input_statement_idx += 1
elif env.sos_dict["step_input"].groups:
# if default has groups...
# default case
self._substeps = env.sos_dict["step_input"].groups
# assuming everything starts from 0 is after input
input_statement_idx = 0
else:
# default case
self._substeps = [env.sos_dict["step_input"]]
# assuming everything starts from 0 is after input
input_statement_idx = 0
self.proc_results = {}
self.vars_to_be_shared = set()
if "shared" in self.step.options:
self.vars_to_be_shared = parse_shared_vars(self.step.options["shared"])
self.vars_to_be_shared = sorted(
[
x[5:] if x.startswith("step_") else x
for x in self.vars_to_be_shared
if x not in ("step_", "step_input", "step_output", "step_depends")
]
)
self.shared_vars = [{} for x in self._substeps]
# run steps after input statement, which will be run multiple times for each input
# group.
env.sos_dict.set("__num_groups__", len(self._substeps))
# determine if a single index or the whole step should be skipped
skip_index = False
# signatures of each index, which can remain to be None if no output
# is defined.
self.output_groups = [sos_targets([]) for x in self._substeps]
self.depends_groups = [sos_targets([]) for x in self._substeps]
# used to prevent overlapping output from substeps
self._all_outputs = set()
self._subworkflow_results = []
if (
any("sos_run" in x[1] for x in self.step.statements[input_statement_idx:])
and "shared" not in self.step.options
and not self.step.task
and self.step.statements[-1][0] == "!"
and (len(self.step.statements) == 1 or self.step.statements[-2][0] == ":")
and is_sos_run_the_only_last_stmt(self.step.statements[-1][1])
):
env.sos_dict.set("__concurrent_subworkflow__", True)
if self.concurrent_substep:
if len(self._substeps) <= 1 or env.config["run_mode"] == "dryrun":
self.concurrent_substep = False
elif any(
"sos_run" in x[1] for x in self.step.statements[input_statement_idx:]
):
self.concurrent_substep = False
env.logger.debug(
"Substeps are executed sequentially because of existence of multiple nested workflow."
)
else:
self.prepare_substep()
try:
self.completed["__substep_skipped__"] = 0
self.completed["__substep_completed__"] = len(self._substeps)
self._completed_concurrent_substeps = 0
# pending signatures are signatures for steps with external tasks
self.pending_signatures = [None for x in self._substeps]
for idx, g in enumerate(self._substeps):
#
# https://github.com/vatlab/sos/issues/1376
#
# [default]
# input: for_each=dict(i=range(1000))
# sos_run('a', t=i)
#
# when we have workflow like the following when steps
# are executed quickly with subworkflows submitted to the master
# the master process could be swamped with subworkflows, causing
# "too many open files".
#
# the following code will stop the step from continued
# execution and wait for the subworkflows to complete.
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(
allow_pending=env.config["worker_procs"]
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# other variables
#
_vars = {}
# now, let us expose target level variables as lists
if len(g) > 1:
names = set.union(*[set(x._dict.keys()) for x in g._targets])
elif len(g) == 1:
names = set(g._targets[0]._dict.keys())
else:
names = set()
for name in names:
_vars[name] = [x.get(name) for x in g._targets]
# then we expose all group level variables
_vars.update(g._dict)
_vars.update(env.sos_dict["step_input"]._dict)
env.sos_dict.update(_vars)
env.sos_dict.set("_input", copy.deepcopy(g))
# set vars to _input
# env.sos_dict['_input'].set(**v)
self.log("_input")
env.sos_dict.set("_index", idx)
if env.config["error_mode"] == "ignore":
missed = [x for x in g.targets if not x.target_exists()]
if missed:
if any(isinstance(x, invalid_target) for x in missed):
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to invalid input caused by previous failed substep.'
)
else:
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to missing input {sos_targets(missed)}'
)
self.output_groups[idx] = sos_targets(invalid_target())
env.sos_dict.set("_output", sos_targets(invalid_target()))
self.skip_substep()
continue
# in interactive mode, because sos_dict are always shared
# execution of a substep, especially when it calls a nested
# workflow, would change step_name, __step_context__ etc, and
# we will have to reset these variables to make sure the next
# substep would execute normally. Batch mode is immune to this
# problem because nested workflows are executed in their own
# process/context etc
if env.config["run_mode"] == "interactive":
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set(
"step_id",
hash(
(
env.sos_dict["workflow_id"],
env.sos_dict["step_name"],
self.step.md5,
)
),
)
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
#
pre_statement = []
if (
not any(
st[0] == ":" and st[1] == "output"
for st in self.step.statements[input_statement_idx:]
)
and "__default_output__" in env.sos_dict
):
pre_statement = [[":", "output", "_output"]]
# if there is no statement, no task, claim success
post_statement = []
if not self.step.statements or self.step.statements[-1][0] != "!":
if self.step.task:
# if there is only task, we insert a fake statement so that it can be executed by the executor
post_statement = [["!", ""]]
else:
# complete case: no step, no statement
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
all_statements = (
pre_statement
+ self.step.statements[input_statement_idx:]
+ post_statement
)
self.is_input_verified = True
for statement_idx, statement in enumerate(all_statements):
is_last_runblock = statement_idx == len(all_statements) - 1
# if input is undertermined, we can only process output:
if not g.valid() and statement[0] != ":":
raise RuntimeError("Undetermined input encountered")
if statement[0] == ":":
key, value = statement[1:3]
# output, depends, and process can be processed multiple times
while True:
# loop for all unresolved targets to be resolved
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# dynamic output or dependent files
if key == "output":
# if output is defined, its default value needs to be cleared
if idx == 0:
env.sos_dict.set("step_output", sos_targets())
ofiles: sos_targets = expand_output_files(
value,
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_OUTPUT_OPTIONS
},
)
if g.valid() and ofiles.valid():
if any(
x in g._targets
for x in ofiles
if not isinstance(x, sos_step)
):
raise RuntimeError(
f'Overlapping input and output files: {", ".join(repr(x) for x in ofiles if x in g)}'
)
# set variable _output and output
self.process_output_args(
ofiles,
**{
k: v
for k, v in kwargs.items()
if k in SOS_OUTPUT_OPTIONS
},
)
self.output_groups[idx] = env.sos_dict["_output"]
elif key == "depends":
try:
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(
dfiles, **kwargs
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
self.depends_groups[idx] = env.sos_dict[
"_depends"
]
self.log("_depends")
except Exception:
# env.logger.info(e)
raise
else:
raise RuntimeError(f"Unrecognized directive {key}")
# everything is ok, break
break
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
# if input is Undertermined, it is possible that output cannot be processed
# due to that, and we just return
if not g.valid():
env.logger.debug(e)
return self.collect_result()
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
elif is_last_runblock:
if (
env.config["sig_mode"] == "skip"
and not self.vars_to_be_shared
and "sos_run" not in statement[1]
and not env.sos_dict["_output"].unspecified()
and len(env.sos_dict["_output"]) > 0
and all(
x.target_exists()
for x in env.sos_dict["_output"].targets
)
and env.sos_dict["_output"].later_than(
env.sos_dict["_input"]
)
):
self.skip_substep()
env.logger.info(
f'``{env.sos_dict["step_name"]}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``skipped`` with existing output.'
)
skip_index = True
# do not execute the rest of the statement
break
#
# default mode, check if skipping substep
sig = None
if (
env.config["sig_mode"]
not in ("ignore", "distributed", "build")
and not env.sos_dict["_output"].unspecified()
):
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if skip_index:
# matched["output"] might hav vars not defined in "output" #1355
env.sos_dict.set("_output", matched["output"])
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
self.skip_substep()
break
try:
if self.concurrent_substep:
self.concurrent_exec(statement, sig)
# we check if the previous task has been completed and process them
# because further steps might need to be done
try:
runner = self.process_returned_substep_result(
till=idx + 1, wait=False
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
elif (
env.config["sig_mode"] == "ignore"
or env.sos_dict["_output"].unspecified()
):
self.local_exec_without_signature(statement)
else:
skip_index = self.local_exec_with_signature(
statement, sig
)
if skip_index:
self.skip_substep()
break
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception as e:
clear_output()
if env.config["error_mode"] == "abort":
raise
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"{self.step.step_name(True)} {idx_msg} returns no output due to error: {e}"
)
self.output_groups[idx] = sos_targets(invalid_target())
skip_index = True
else:
if env.config["run_mode"] != "interactive":
# default mode
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.error(
f"{self.step.step_name(True)} {idx_msg} returns an error."
)
self.exec_error.append(str(idx), e)
else:
# if it is not the last statement group (e.g. statements before :output)
# we execute locally without anything like signature
if self.is_input_verified:
verify_input()
self.is_input_verified = False
try:
self.execute(statement[1])
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception:
clear_output()
raise
# if there is no statement , but there are tasks, we should
# check signature here.
if (
(not self.step.statements or self.step.statements[-1][0] != "!")
and self.step.task
and not self.concurrent_substep
and env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
):
skip_index = self.check_task_sig()
# if this index is skipped, go directly to the next one
if skip_index:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
skip_index = False
continue
# if concurrent input group, tasks are handled in substep
if self.concurrent_substep or not self.step.task:
continue
if env.config["run_mode"] == "dryrun" and env.sos_dict["_index"] != 0:
continue
# # check if the task is active
# if 'active' in env.sos_dict['_runtime']:
# if not self.is_task_active():
# continue
#
self.log("task")
try:
task_id, taskdef, task_vars = create_task(
self.step.global_def,
self.step.global_vars,
self.step.task,
self.step.task_params,
)
task = self.submit_task(
{
"index": env.sos_dict["_index"],
"task_id": task_id,
"task_def": taskdef,
"task_vars": task_vars,
}
)
self.proc_results[env.sos_dict["_index"]] = task
except Exception as e:
# FIXME: cannot catch exception from subprocesses
if env.verbosity > 2:
sys.stderr.write(get_traceback())
raise RuntimeError(
f'Failed to execute process\n"{short_repr(self.step.task)}"\n{e}'
)
#
# # if not concurrent, we have to wait for the completion of the task
# if 'concurrent' in env.sos_dict['_runtime'] and env.sos_dict[
# '_runtime']['concurrent'] is False:
# # in this case the steps must be executed not concurrently
# runner = self.wait_for_results(all_submitted=False)
# try:
# yreq = next(runner)
# while True:
# yres = yield yreq
# yreq = runner.send(yres)
# except StopIteration:
# pass
#
# endfor loop for each input group
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(allow_pending=0)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.pop("__concurrent_subworkflow__")
runner = self.wait_for_results(all_submitted=True)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
for idx, res in self.proc_results.items():
if "sig_skipped" in res:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
if "output" in res:
self.output_groups[idx] = res["output"]
# check results
self.check_results()
# if error happened but we allow all substeps to be completed, we now
# raise exception
if self.exec_error.errors:
raise self.exec_error
# if output is Undetermined, re-evalulate it
# finalize output from output_groups because some output might be skipped
# this is the final version of the output but we do maintain output
# during the execution of step, for compatibility.
env.sos_dict.set(
"step_output", sos_targets([])._add_groups(self.output_groups)
)
env.sos_dict.set(
"step_depends", sos_targets([])._add_groups(self.depends_groups)
)
# if there exists an option shared, the variable would be treated as
# provides=sos_variable(), and then as step_output
if "shared" in self.step.options:
self.shared_vars = evaluate_shared(
self.shared_vars, self.step.options["shared"]
)
env.sos_dict.quick_update(self.shared_vars)
missing = self.verify_output()
self.log(
"output",
msg=f'\033[95m missing: {short_repr(missing)} ({len(missing)} item{"s" if len(missing)>1 else ""})\033[0m'
if len(missing) > 0
else "",
)
self.calculate_completed()
def file_only(targets):
if not isinstance(targets, sos_targets):
env.logger.warning(
f"Unexpected input or output target for reporting. Empty list returned: {targets}"
)
return []
return [
(str(x), x.size())
for x in targets._targets
if isinstance(x, file_target)
]
step_info = {
"step_id": self.step.md5,
"start_time": self.start_time,
"stepname": self.step.step_name(True),
"substeps": len(self._substeps),
"input": file_only(env.sos_dict["step_input"]),
"output": file_only(env.sos_dict["step_output"]),
"completed": dict(self.completed),
"end_time": time.time(),
}
send_message_to_controller(
["workflow_sig", "step", env.sos_dict["workflow_id"], repr(step_info)]
)
return self.collect_result()
finally:
if self.concurrent_substep:
close_socket(self.result_pull_socket, "substep collector")
class Step_Executor(Base_Step_Executor):
"""Single process step executor"""
def __init__(self, step, socket, mode="run"):
self.run_mode = mode
env.config["run_mode"] = mode
super(Step_Executor, self).__init__(step)
self.socket = socket
# because step is executed in a separate SoS_Worker process, this
# __socket__ is available to all the actions that will be executed
# in the step
env.__socket__ = socket
def submit_tasks(self, tasks):
if "TASK" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("TASK", f"Send {tasks}")
self.socket.send(
encode_msg(["tasks", env.sos_dict["_runtime"]["queue"]] + tasks)
)
def wait_for_tasks(self, tasks, all_submitted):
# wait for task is a generator function that yields the request
# to the runner
if not tasks:
return {}
# when we wait, the "outsiders" also need to see the tags etc
# of the tasks so we have to write to the database. #156
send_message_to_controller(["commit_sig"])
# wait till the executor responde
results = {}
while True:
# yield an indicator of what is requested, for debugging purpose
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
results.update(res)
# all results have been obtained.
if len(results) == len(tasks):
break
return results
def wait_for_subworkflows(self, allow_pending):
"""Wait for results from subworkflows"""
try:
allow_pending = int(allow_pending)
except:
allow_pending = min(max(os.cpu_count() // 2, 2), 8)
while self._subworkflow_results:
if allow_pending > 0:
n_pending = sum(
len(x["pending_workflows"]) for x in self._subworkflow_results
)
if n_pending <= allow_pending:
break
# here we did not check if workflow ids match
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
elif isinstance(res, Exception):
raise res
if not "__workflow_id__" in res:
raise ValueError(f"Unrecognized result from subworkflows: {res}")
# remove from _self._subworkflow_results
result_with_id = [
idx
for idx, x in enumerate(self._subworkflow_results)
if res["__workflow_id__"] in x["pending_workflows"]
]
if not result_with_id:
raise RuntimeError(
f"Failed to identify ID of returned subworkflow: {res}"
)
if len(result_with_id) > 1:
raise RuntimeError(
"Multiple matches of subworkflow ID. This should not happen."
)
self._subworkflow_results[result_with_id[0]]["pending_workflows"].remove(
res["__workflow_id__"]
)
if not self._subworkflow_results[result_with_id[0]]["pending_workflows"]:
self._subworkflow_results.pop(result_with_id[0])
def handle_unknown_target(self, e):
self.socket.send(encode_msg(["missing_target", e.target]))
yield self.socket
res = decode_msg(self.socket.recv())
if not res:
raise e
def verify_dynamic_targets(self, targets):
if not targets:
return
if env.config["trace_existing"]:
traced = targets
else:
traced = [x for x in targets if x.traced]
if not traced:
return
self.socket.send(encode_msg(["dependent_target"] + traced))
yield self.socket
res = decode_msg(self.socket.recv())
if res != "target_resolved":
raise RuntimeError(f"Failed to veryify dependent target {traced}")
def run(self):
try:
try:
# 1218
runner = Base_Step_Executor.run(self)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
res = e.value
if self.socket is not None:
if (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends result {short_repr(res)}",
)
self.socket.send(encode_msg(res))
else:
return res
except RemovedTarget as e:
# removed target needs to be handled differently since the workflow manager
# use type information to get removed targets
if self.socket is not None and not self.socket.closed:
self.socket.send(encode_msg(e))
else:
raise e
except Exception as e:
if env.verbosity > 2:
sys.stderr.write(get_traceback())
if isinstance(e, ProcessKilled):
raise
# if not self.exec_error
if e != self.exec_error:
self.exec_error.append(self.step.step_name(), e)
#
if self.exec_error.errors:
if self.socket is not None and not self.socket.closed:
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends exception {self.exec_error}",
)
self.socket.send(encode_msg(self.exec_error))
else:
raise self.exec_error
| 42.433839
| 193
| 0.486893
|
import ast
import copy
import os
import subprocess
import sys
import time
from collections import defaultdict
from collections.abc import Mapping, Sequence
from typing import List
import zmq
from .controller import close_socket, create_socket, send_message_to_controller
from .messages import encode_msg, decode_msg
from .eval import SoS_eval, SoS_exec, accessed_vars, KeepOnlyImportAndDefine
from .executor_utils import (
__named_output__,
__null_func__,
__output_from__,
__traced__,
clear_output,
create_task,
get_traceback_msg,
reevaluate_output,
statementMD5,
validate_step_sig,
verify_input,
ExecuteError,
)
from .syntax import (
SOS_DEPENDS_OPTIONS,
SOS_INPUT_OPTIONS,
SOS_OUTPUT_OPTIONS,
SOS_TARGETS_OPTIONS,
)
from .targets import (
RemovedTarget,
RuntimeInfo,
UnavailableLock,
sos_variable,
UnknownTarget,
dynamic,
file_target,
sos_step,
sos_targets,
invalid_target
)
from .tasks import MasterTaskParams, TaskFile
from .utils import (
ArgumentError,
StopInputGroup,
TerminateExecution,
env,
get_traceback,
short_repr,
ProcessKilled,
get_localhost_ip,
textMD5,
)
__all__: List = []
class TaskManager:
def __init__(self, num_tasks, trunk_size, trunk_workers):
super(TaskManager, self).__init__()
self.num_tasks = num_tasks
import math
self._slots = [[] for x in range(math.ceil(num_tasks / trunk_size))]
self._last_slot_size = (
trunk_size if (num_tasks % trunk_size == 0) else (num_tasks % trunk_size)
)
self.trunk_size = trunk_size
self.trunk_workers = trunk_workers
self._submitted_tasks = []
self._unsubmitted_slots = []
self._unsubmitted_tasks = []
self._all_ids = []
self._all_output = []
self._terminate = False
self._tags = {}
def set(self, idx, task_def):
slot = idx // self.trunk_size
self._slots[slot].append([idx, task_def])
if len(self._slots[slot]) == self.trunk_size or (
slot == len(self._slots) - 1
and len(self._slots[slot]) == self._last_slot_size
):
if not all([x[1] is None for x in self._slots[slot]]):
if self.trunk_size == 1 or any(x[1] is None for x in self._slots[slot]):
self._unsubmitted_tasks.extend(
[x[1] for x in self._slots[slot] if x[1] is not None]
)
else:
self._unsubmitted_slots.append(
sorted(self._slots[slot], key=lambda x: x[0])
)
self._slots[slot] = []
if not task_def:
return
if isinstance(task_def[2], Sequence):
self._all_output.extend(task_def[2])
self._all_ids.append(task_def[0])
self._tags[task_def[0]] = task_def[1].tags
def tags(self, task_id):
return self._tags.get(task_id, [])
def index_of(self, task_id):
if task_id in self._all_ids:
return self._all_ids.index(task_id)
else:
return -1
def has_output(self, output):
if not isinstance(output, Sequence) or not self._unsubmitted_slots:
return False
return any(x in self._all_output for x in output)
def get_job(self, all_tasks=False):
ids = []
for slot in self._unsubmitted_slots:
master = MasterTaskParams(self.trunk_workers)
for _, (task_id, taskdef, _) in slot:
master.push(task_id, taskdef)
ids.append(master.ID)
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
self._unsubmitted_slots = []
if self.trunk_size == 1 or all_tasks:
to_be_submitted = self._unsubmitted_tasks
[
to_be_submitted.extend([x[1] for x in slot if x[1] is not None])
for slot in self._slots
if slot
]
self._unsubmitted_tasks = []
else:
num_tasks = (
len(self._unsubmitted_tasks) // self.trunk_size * self.trunk_size
)
to_be_submitted = self._unsubmitted_tasks[:num_tasks]
self._unsubmitted_tasks = self._unsubmitted_tasks[num_tasks:]
if self.trunk_size == 1 or (all_tasks and len(self._unsubmitted_tasks) == 1):
for task_id, taskdef, _ in to_be_submitted:
TaskFile(task_id).save(taskdef)
send_message_to_controller(
[
"workflow_sig",
"task",
task_id,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(task_id)
else:
master = None
for task_id, taskdef, _ in to_be_submitted:
if master is not None and master.num_tasks() == self.trunk_size:
ids.append(master.ID)
TaskFile(master.ID).save(master)
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
master = None
if master is None:
master = MasterTaskParams(self.trunk_workers)
master.push(task_id, taskdef)
if master is not None:
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(master.ID)
if not ids:
return None
self._submitted_tasks.extend(ids)
return ids
def clear_submitted(self):
self._submitted_tasks = []
def expand_input_files(*args, **kwargs):
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
if not args and not kwargs:
return env.sos_dict["step_input"]
elif not args and all(x in SOS_TARGETS_OPTIONS for x in kwargs.keys()):
return sos_targets(
env.sos_dict["step_input"],
_verify_existence=env.config["error_mode"] != "ignore",
**kwargs,
)
else:
return sos_targets(
*args,
**kwargs,
_verify_existence=env.config["error_mode"] != "ignore",
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_depends_files(*args, **kwargs):
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
return sos_targets(
*args,
**kwargs,
_verify_existence=True,
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_output_files(value, *args, **kwargs):
if any(isinstance(x, dynamic) for x in args) or any(
isinstance(y, dynamic) for y in kwargs.values()
):
return sos_targets(_undetermined=value)
else:
return sos_targets(
*args, **kwargs, _undetermined=False, _source=env.sos_dict["step_name"]
)
def parse_shared_vars(option):
shared_vars = set()
if not option:
return shared_vars
if isinstance(option, str):
shared_vars.add(option)
elif isinstance(option, Mapping):
for val in option.values():
shared_vars |= accessed_vars(val, mode="eval")
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
shared_vars.add(item)
elif isinstance(item, Mapping):
for val in item.values():
shared_vars |= accessed_vars(val, mode="eval")
return shared_vars
def evaluate_shared(vars, option):
shared_vars = {}
env.sos_dict.quick_update(vars[-1])
for key in vars[-1].keys():
try:
if key in ("output", "depends", "input"):
env.logger.warning(
f"Cannot overwrite variable step_{key} from substep variable {key}"
)
else:
env.sos_dict.set("step_" + key, [x[key] for x in vars])
except Exception as e:
env.logger.warning(f"Failed to create step level variable step_{key}: {e}")
if isinstance(option, str):
if option in env.sos_dict:
shared_vars[option] = env.sos_dict[option]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(option, Mapping):
for var, val in option.items():
try:
if var == val:
shared_vars[var] = env.sos_dict[var]
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
if item in env.sos_dict:
shared_vars[item] = env.sos_dict[item]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(item, Mapping):
for var, val in item.items():
try:
if var == val:
continue
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str or mapping are accepted in sequence: {option}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str, sequence, or mapping are accepted in sequence: {option}"
)
return shared_vars
def get_value_of_param(name, param_list, extra_dict={}):
tree = ast.parse(f"__null_func__({param_list})")
# x.func can be an attribute (e.g. a.b()) and do not have id
kwargs = [
x for x in ast.walk(tree) if x.__class__.__name__ == "keyword" and x.arg == name
]
if not kwargs:
return []
try:
return [ast.literal_eval(kwargs[0].value)]
except Exception:
return [
eval(
compile(
ast.Expression(body=kwargs[0].value),
filename="<string>",
mode="eval",
),
extra_dict,
)
]
def is_sos_run_the_only_last_stmt(stmt):
tree = ast.parse(stmt)
return (
len(tree.body) >= 1
and isinstance(tree.body[-1], ast.Expr)
and isinstance(tree.body[-1].value, ast.Call)
and hasattr(tree.body[-1].value.func, "id")
and tree.body[-1].value.func.id == "sos_run"
and len(
[
x
for x in ast.walk(tree)
if isinstance(x, ast.Call)
and hasattr(x.func, "id")
and x.func.id == "sos_run"
]
)
== 1
)
class Base_Step_Executor:
# This base class defines how steps are executed. The derived classes will reimplement
# some function to behave differently in different modes.
#
def __init__(self, step):
self.step = step
self.task_manager = None
self.exec_error = ExecuteError(self.step.step_name())
#
# Functions that should be redefined in derived class
#
def submit_tasks(self, tasks):
raise RuntimeError("Undefined base function submit_tasks")
def wait_for_tasks(self, tasks, all_submitted):
# this will be redefined in subclasses
raise RuntimeError("Undefined base function wait_for_tasks")
def wait_for_subworkflows(self, allow_pending=0):
raise RuntimeError("Undefined base function wait_for_subworkflows")
def handle_unknown_target(self, e):
raise RuntimeError("Undefined base function handle_unknown_target")
def init_input_output_vars(self):
# if there is __step_output__ from previous step, use it as default input
# otherwise, reset to empty
if (
"__step_output__" not in env.sos_dict
or env.sos_dict["__step_output__"].unspecified()
):
env.sos_dict.set("step_input", sos_targets([]))
else:
env.sos_dict.set("step_input", env.sos_dict["__step_output__"])
# input can be Undetermined from undetermined output from last step
env.sos_dict.set("_input", copy.deepcopy(env.sos_dict["step_input"]))
# if there is default output for auxiliary steps, use it as step_output and _output
# otherwise reset to unspecified.
if "__default_output__" in env.sos_dict:
# if step is triggered by sos_step, it should not be considered as
# output of the step. #981
env.sos_dict.set(
"__default_output__",
sos_targets(
[
x
for x in env.sos_dict["__default_output__"]._targets
if not isinstance(x, sos_step)
]
),
)
env.sos_dict.set(
"step_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
env.sos_dict.set(
"_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
else:
env.sos_dict.set("step_output", sos_targets([]))
# output is said to be unspecified until output: is used
env.sos_dict.set("_output", sos_targets(_undetermined=True))
env.sos_dict.set("step_depends", sos_targets([]))
env.sos_dict.set("_depends", sos_targets([]))
#
# Common functions
#
def verify_output(self):
missing = sos_targets([])
if env.sos_dict["step_output"] is None:
return
if not env.sos_dict["step_output"].valid():
raise RuntimeError(
"Output of a completed step cannot be undetermined or unspecified."
)
for target in env.sos_dict["step_output"]:
if isinstance(target, (sos_step, invalid_target)):
continue
if isinstance(target, str):
if not file_target(target).target_exists("any"):
if env.config["run_mode"] == "dryrun":
# in dryrun mode, we just create these targets
file_target(target).create_placeholder()
else:
# latency wait for 2 seconds because the file system might be slow
if env.config["run_mode"] == "run":
time.sleep(2)
if not file_target(target).target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]} (curdir={os.getcwd()})'
)
elif not target.target_exists("any"):
if env.config["run_mode"] == "dryrun":
target.create_placeholder()
else:
if env.config["run_mode"] == "run":
time.sleep(2)
if not target.target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]}'
)
return missing
# directive input
def process_input_args(self, ifiles: sos_targets, **kwargs):
if ifiles.unspecified():
env.sos_dict.set("step_input", sos_targets([]))
env.sos_dict.set("_input", sos_targets([]))
env.sos_dict.set("step_output", sos_targets())
return [sos_targets([])], [{}]
assert isinstance(ifiles, sos_targets)
if env.sos_dict.get("__dynamic_input__", False):
runner = self.verify_dynamic_targets(
[x for x in ifiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# input file is the filtered files
env.sos_dict.set("step_input", ifiles)
env.sos_dict.set("_input", ifiles)
if ifiles._num_groups() == 0:
ifiles._group("all")
#
return ifiles.groups
def verify_dynamic_targets(self, target):
yield None
return True
def process_depends_args(self, dfiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_DEPENDS_OPTIONS:
raise RuntimeError(f"Unrecognized depends option {k}")
if dfiles.undetermined():
raise ValueError(r"Depends needs to handle undetermined")
if env.sos_dict.get("__dynamic_depends__", False):
runner = self.verify_dynamic_targets(
[x for x in dfiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.set("_depends", dfiles)
env.sos_dict.set("step_depends", dfiles)
def process_output_args(self, ofiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_OUTPUT_OPTIONS:
raise RuntimeError(f"Unrecognized output option {k}")
if ofiles._num_groups() > 0:
if ofiles._num_groups() == 1:
ofiles = ofiles._get_group(0)
elif ofiles._num_groups() != len(self._substeps):
raise RuntimeError(
f"Inconsistent number of output ({ofiles._num_groups()}) and input ({len(self._substeps)}) groups."
)
else:
ofiles = ofiles._get_group(env.sos_dict["_index"])
# create directory
if ofiles.valid():
parents = set(
[
os.path.abspath(os.path.join(ofile, os.pardir))
for ofile in ofiles
if isinstance(ofile, file_target)
]
)
for parent_dir in parents:
if parent_dir and not os.path.isdir(parent_dir):
os.makedirs(parent_dir, exist_ok=True)
# set variables
env.sos_dict.set("_output", ofiles)
env.sos_dict.set("step_output", ofiles)
#
for ofile in ofiles:
oname = ofile.target_name()
if oname in self._all_outputs:
raise ValueError(
f'Output {ofile} from substep {env.sos_dict["_index"]} of {env.sos_dict["__num_groups__"]} substeps overlaps with output from a previous substep.'
)
self._all_outputs.add(oname)
def submit_task(self, task_info):
if self.task_manager is None:
if self.step.task_params:
for key in ("trunk_size", "trunk_workers", "queue"):
val = get_value_of_param(
key, self.step.task_params, extra_dict=env.sos_dict.dict()
)
if val:
env.sos_dict["_runtime"][key] = val[0]
if "trunk_size" in env.sos_dict["_runtime"]:
trunk_size = env.sos_dict["_runtime"]["trunk_size"]
if trunk_size is None or trunk_size <= 0:
trunk_size = env.sos_dict["__num_groups__"]
if not isinstance(trunk_size, int):
raise ValueError(
f'An integer value or None is expected for runtime option trunk_size, "{trunk_size}" provided'
)
else:
trunk_size = 1
if "trunk_workers" in env.sos_dict["_runtime"]:
if "nodes" in env.sos_dict["_runtime"]:
raise ValueError(
'Option "trunk_workers" that specifies number of nodes and processes for the execution '
'of single-node jobs and option "nodes" that specifies number of nodes for single multi-node '
"jobs cannot be used at the same time."
)
trunk_workers = env.sos_dict["_runtime"]["trunk_workers"]
else:
trunk_workers = None
# if 'queue' in env.sos_dict['_runtime'] and env.sos_dict['_runtime']['queue']:
# host = env.sos_dict['_runtime']['queue']
# else:
# # otherwise, use workflow default
# host = '__default__'
self.task_manager = TaskManager(
env.sos_dict["__num_groups__"], trunk_size, trunk_workers
)
task_id = task_info["task_id"]
task_index = task_info["index"]
if task_id is None:
self.task_manager.set(task_index, None)
return None
taskdef = task_info["task_def"]
task_vars = task_info["task_vars"]
# 618
# it is possible that identical tasks are executed (with different underlying random numbers)
# we should either give a warning or produce different ids...
if self.task_manager.index_of(task_id) >= 0:
raise RuntimeError(
f'Task {task_id} generated for (_index={env.sos_dict["_index"]}) is identical to a previous one (_index={self.task_manager.index_of(task_id)}).'
)
elif self.task_manager.has_output(task_vars["_output"]):
raise RuntimeError(
f'Task produces output files {", ".join(task_vars["_output"])} that are output of other tasks.'
)
# if no trunk_size, the job will be submitted immediately
# otherwise tasks will be accumulated and submitted in batch
self.task_manager.set(task_index, (task_id, taskdef, task_vars["_output"]))
tasks = self.task_manager.get_job()
if tasks:
self.submit_tasks(tasks)
return task_id
def wait_for_results(self, all_submitted):
# this is a generator function because wait_for_tasks is a generator
# function and needs to yield to the caller
if self.concurrent_substep:
try:
runner = self.wait_for_substep()
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
if self.task_manager is None:
return {}
#
# report task
# what we should do here is to get the alias of the Host
# because it can be different (e.g. not localhost
queue = env.sos_dict["_runtime"]["queue"]
# submit the last batch of tasks
tasks = self.task_manager.get_job(all_tasks=True)
if tasks:
self.submit_tasks(tasks)
# waiting for results of specified IDs
try:
# 1218
runner = self.wait_for_tasks(
self.task_manager._submitted_tasks, all_submitted
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
results = e.value
for id, result in results.items():
# turn to string to avoid naming lookup issue
rep_result = {
x: (y if isinstance(y, (int, bool, float, str)) else short_repr(y))
for x, y in result.items()
}
rep_result["tags"] = " ".join(self.task_manager.tags(id))
rep_result["queue"] = queue
send_message_to_controller(["workflow_sig", "task", id, repr(rep_result)])
self.task_manager.clear_submitted()
# if in dryrun mode, we display the output of the dryrun task
if env.config["run_mode"] == "dryrun":
tid = list(results.keys())[0]
tf = TaskFile(tid)
if tf.has_stdout():
print(TaskFile(tid).stdout)
for idx, task in self.proc_results.items():
# if it is done
if isinstance(task, dict):
continue
if task in results:
self.proc_results[idx] = results[task]
else:
# can be a subtask
for _, mres in results.items():
if "subtasks" in mres and task in mres["subtasks"]:
self.proc_results[idx] = mres["subtasks"][task]
# elif 'exception' in mres:
# self.proc_results[idx] = mres
#
# check if all have results?
if any(isinstance(x, str) for x in self.proc_results.values()):
raise RuntimeError(
f'Failed to get results for tasks {", ".join(x for x in self.proc_results.values() if isinstance(x, str))}'
)
#
for idx, res in self.proc_results.items():
if "skipped" in res and res["skipped"]:
self.completed["__task_skipped__"] += 1
# complete case: task skipped
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
# complete case: task completed
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.completed["__task_completed__"] += 1
if "shared" in res:
self.shared_vars[idx].update(res["shared"])
def log(self, stage=None, msg=""):
if stage == "start":
env.logger.info(
f'{"Checking" if env.config["run_mode"] == "dryrun" else "Running"} ``{self.step.step_name(True)}``: {self.step.comment.strip()}'
)
elif stage == "input statement":
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("STEP", f"Handling input statement {msg}")
elif stage == "_input":
if env.sos_dict["_input"] is not None and len(env.sos_dict["_input"]) > 0:
env.logger.debug(
f'_input: ``{short_repr(env.sos_dict["_input"])}``{msg}'
)
elif stage == "_depends":
if env.sos_dict["_depends"] is not None:
env.logger.debug(
f'_depends: ``{short_repr(env.sos_dict["_depends"])}``{msg}'
)
elif stage == "input":
if env.sos_dict["step_input"] is not None:
env.logger.info(
f'input: ``{short_repr(env.sos_dict["step_input"])}``{msg}'
)
elif stage == "output":
if (
env.sos_dict["step_output"] is not None
and len(env.sos_dict["step_output"]) > 0
):
env.logger.info(
f'``{self.step.step_name(True)}`` output: ``{short_repr(env.sos_dict["step_output"])}``{msg}'
)
def execute(self, stmt, return_result=False):
try:
self.last_res = SoS_exec(
stmt,
return_result=return_result or env.config["run_mode"] == "interactive",
)
if return_result:
return self.last_res
except (StopInputGroup, TerminateExecution, UnavailableLock):
raise
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr)
except ArgumentError:
raise
except ProcessKilled:
raise
except KeyboardInterrupt as e:
raise RuntimeError(get_traceback_msg(e))
except Exception as e:
raise RuntimeError(get_traceback_msg(e))
def prepare_substep(self):
# socket to collect result
self.result_pull_socket = create_socket(
env.zmq_context, zmq.PULL, "substep result collector"
)
local_ip = get_localhost_ip()
port = self.result_pull_socket.bind_to_random_port(f"tcp://{local_ip}")
env.config["sockets"]["result_push_socket"] = f"tcp://{local_ip}:{port}"
def submit_substep(self, param):
send_message_to_controller(["substep", param])
def process_returned_substep_result(self, till=None, wait=True):
while True:
if not wait:
# 1213
cur_index = env.sos_dict["_index"]
pending_substeps = cur_index - self._completed_concurrent_substeps + 1
if pending_substeps < (
100
if isinstance(self.concurrent_substep, bool)
else self.concurrent_substep
):
if not self.result_pull_socket.poll(0):
return
elif (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
# if there are more than 100 pending substeps
# we wait indefinitely for the results
env.log_to_file(
"STEP",
f"Wait for more substeps to be done before submitting. (index={cur_index}, processed={self._completed_concurrent_substeps})",
)
elif self._completed_concurrent_substeps == till:
return
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
if isinstance(res["exception"], ProcessKilled):
raise res["exception"]
elif isinstance(res["exception"], RemovedTarget):
pass
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"""Ignoring error from ``{self.step.step_name(True)}`` {idx_msg}: {res["exception"]}."""
)
res["output"] = sos_targets(invalid_target())
elif env.config["error_mode"] == "abort":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
# try to stop everything but wait till for submitted tasks to
# complete
self._completed_concurrent_substeps + 1
waiting = till - 1 - self._completed_concurrent_substeps
env.logger.warning(
f'``{self.step.step_name(True)}`` {idx_msg} returns an error.{f" Terminating step after completing {waiting} submitted substeps." if waiting else " Terminating now."}'
)
for i in range(waiting):
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
self.exec_error.append(
f'index={res["index"]}', res["exception"]
)
raise self.exec_error
else:
# default or unspecified
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
#
if "index" not in res:
raise RuntimeError(
"Result received from substep does not have key index"
)
if "task_id" in res:
task = self.submit_task(res)
# if substep returns tasks, ...
if res["task_id"]:
self.proc_results[res["index"]] = task
else:
# if there is no task_id, the substep must have
# been skipped.
self.proc_results[res["index"]] = res
else:
self.proc_results[res["index"]] = res
self._completed_concurrent_substeps += 1
def wait_for_substep(self):
while self._completed_concurrent_substeps < len(self.proc_results):
try:
runner = self.process_returned_substep_result(
till=len(self.proc_results), wait=True
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
def collect_result(self):
# only results will be sent back to the master process
#
# __step_input__: input of this step
# __steo_output__: output of this step
# __step_depends__: dependent files of this step
result = {
"__step_input__": env.sos_dict["step_input"],
"__step_output__": env.sos_dict["step_output"],
"__step_depends__": env.sos_dict["step_depends"],
"__step_name__": env.sos_dict["step_name"],
"__completed__": self.completed,
}
result["__last_res__"] = self.last_res
result["__shared__"] = {}
if "shared" in self.step.options:
result["__shared__"] = self.shared_vars
for x in result["__step_output__"].targets:
if isinstance(x, sos_variable):
result["__shared__"][x.target_name()] = env.sos_dict[x.target_name()]
send_message_to_controller(
[
"progress",
"step_completed",
-1
if "sos_run" in env.sos_dict["__signature_vars__"]
else self.completed["__step_completed__"],
env.sos_dict["step_name"],
env.sos_dict["step_output"],
]
)
return result
def set_task_queue_from_task_params(self):
if self.step.task_params:
try:
task_queue = get_value_of_param(
"queue", self.step.task_params, extra_dict=env.sos_dict.dict()
)
if task_queue:
env.sos_dict["_runtime"]["queue"] = task_queue[0]
except Exception as e:
raise ValueError(
f"Failed to determine value of parameter queue of {self.step.task_params}: {e}"
)
# # check concurrent #1134
# try:
# task_concurrency = get_value_of_param(
# 'concurrent',
# self.step.task_params,
# extra_dict=env.sos_dict.dict())
# if task_concurrency:
# env.sos_dict['_runtime']['concurrent'] = task_concurrency[0]
# except Exception as e:
# raise ValueError(
# f'Failed to determine value of parameter queue of {self.step.task_params}: {e}'
# )
# if -q is unspecified and option queue is unspecified,
# or queue=None is specified, disregard the task keyword
if (
env.config["default_queue"] is None
and "queue" not in env.sos_dict["_runtime"]
) or (
"queue" in env.sos_dict["_runtime"]
and env.sos_dict["_runtime"]["queue"] is None
):
# remove task statement
if len(self.step.statements) >= 1 and self.step.statements[-1][0] == "!":
self.step.statements[-1][1] += "\n" + self.step.task
else:
self.step.statements.append(["!", self.step.task])
self.step.task = None
# is queue is unspecified, it take value from command line
# in this case -q should have been specified
elif "queue" not in env.sos_dict["_runtime"]:
env.sos_dict["_runtime"]["queue"] = env.config["default_queue"]
def local_exec_without_signature(self, statement):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP", f'Execute substep {env.sos_dict["step_name"]} without signature'
)
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
finally:
if not self.step.task:
# if no task, this step is __completed
# complete case: local skip without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
def local_exec_with_signature(self, statement, sig):
idx = env.sos_dict["_index"]
# signature might be built outside of the function
# not in a debug mode delayed to now
if sig is None:
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
# if singaure match, we skip the substep even if
# there are tasks.
matched = validate_step_sig(sig)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
return True
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
sig.lock()
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
finally:
# if this is the end of substep, save the signature
# otherwise we need to wait for the completion
# of the task.
if not self.step.task:
if env.sos_dict["step_output"].undetermined():
output = reevaluate_output()
self.output_groups[env.sos_dict["_index"]] = output
sig.set_output(output)
sig.write()
# complete case : local execution without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
self.pending_signatures[idx] = sig
sig.release()
return False
def skip_substep(self):
idx = env.sos_dict["_index"]
# if concurrent substep, there might be later steps that needs to be rerun
# and we need to mark some steps has been completed.
if self.concurrent_substep:
self._completed_concurrent_substeps += 1
self.proc_results[idx] = {
"index": idx,
"ret_code": 0,
"output": copy.deepcopy(env.sos_dict["_output"]),
}
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
def concurrent_exec(self, statement, sig=None):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} {idx} concurrently with {self._completed_concurrent_substeps} completed',
)
# the ignatures are supposed to be written by substep worker, however
# the substep worker might send tasks back to the step worker and
# we should write the signatures after the tasks are completed
if (
env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
and self.step.task
):
self.pending_signatures[idx] = (
sig
if sig
else RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
)
#
# step_output: needed only when it is undetermined
# step_input: not needed
# _input, _output, _depends, _index: needed
# step_name: for debug scripts
# step_id, workflow_id: for reporting to controller
# '__signature_vars__' to be used for signature creation
#
# __step_context__ is not needed because substep
# executor does not support nested workflow
proc_vars = (
env.sos_dict["__signature_vars__"]
| env.sos_dict["__environ_vars__"]
| {
"_input",
"_output",
"_depends",
"_index",
"step_output",
"step_name",
"_runtime",
"step_id",
"workflow_id",
"__num_groups__",
"__signature_vars__",
}
)
self.proc_results[env.sos_dict["_index"]] = {}
self.submit_substep(
dict(
stmt=statement[1],
global_def=self.step.global_def,
# 1225: the step might contain large variables from global section, but
# we do not have to sent them if they are not used in substeps.
cwd=os.getcwd(),
global_vars={
x: y
for x, y in self.step.global_vars.items()
if x in env.sos_dict["__signature_vars__"]
},
task=self.step.task,
task_params=self.step.task_params,
proc_vars=env.sos_dict.clone_selected_vars(proc_vars),
shared_vars=self.vars_to_be_shared,
config=env.config,
)
)
def check_task_sig(self):
idx = env.sos_dict["_index"]
sig = RuntimeInfo(
statementMD5([self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
env.log_to_file(
"STEP",
f'Check task-only step {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[env.sos_dict["_index"]] = matched["output"]
self.shared_vars[env.sos_dict["_index"]].update(matched["vars"])
# complete case: step with task ignored
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.pending_signatures[idx] = sig
return skip_index
# def is_task_active(self):
# active = env.sos_dict['_runtime']['active']
# env.logger.error(active)
# if active is True:
# return True
# elif active is False:
# return False
# elif isinstance(active, int):
# if active >= 0 and env.sos_dict['_index'] != active:
# return False
# if active < 0 and env.sos_dict[
# '_index'] != active + env.sos_dict['__num_groups__']:
# return False
# return True
# elif isinstance(active, Sequence):
# allowed_index = list([
# x if x >= 0 else env.sos_dict['__num_groups__'] + x
# for x in active
# ])
# return env.sos_dict['_index'] in allowed_index
# elif isinstance(active, slice):
# allowed_index = list(range(env.sos_dict['__num_groups__']))[active]
# return env.sos_dict['_index'] in allowed_index
# else:
# raise RuntimeError(
# f'Unacceptable value for option active: {active}')
def check_results(self):
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] == 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
# now that output is settled, we can write remaining signatures
for idx, res in self.proc_results.items():
if (
self.pending_signatures[idx] is not None
and res["ret_code"] == 0
and "sig_skipped" not in res
):
# task might return output with vars #1355
self.pending_signatures[idx].set_output(self.output_groups[idx])
self.pending_signatures[idx].write()
if res["ret_code"] != 0 and "output" in res:
clear_output(output=res["output"])
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] != 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
if "exception" in proc_result:
excp = proc_result["exception"]
if isinstance(excp, StopInputGroup):
if excp.message:
env.logger.info(excp.message)
self.output_groups[proc_result["index"]] = sos_targets([])
elif isinstance(excp, RemovedTarget):
raise excp
elif "task" in proc_result:
if env.config["error_mode"] == "ignore":
env.logger.warning(f"Ignore failed task {proc_result['task']}.")
# if the exception is from a task...
self.exec_error.append(proc_result["task"], excp)
else:
self.exec_error.append(
RuntimeError(
f"Substep failed with return code {proc_result['ret_code']}"
)
)
# this is after all substeps have been completed
if self.exec_error.errors:
raise self.exec_error
def calculate_completed(self):
substeps = (
self.completed["__substep_completed__"]
+ self.completed["__substep_skipped__"]
)
self.completed["__step_completed__"] = (
self.completed["__substep_completed__"] / substeps
)
self.completed["__step_skipped__"] = (
self.completed["__substep_skipped__"] / substeps
)
if self.completed["__step_completed__"].is_integer():
self.completed["__step_completed__"] = int(
self.completed["__step_completed__"]
)
if self.completed["__step_skipped__"].is_integer():
self.completed["__step_skipped__"] = int(self.completed["__step_skipped__"])
def run(self):
# return value of the last executed statement
self.last_res = None
self.start_time = time.time()
self.completed = defaultdict(int)
#
# prepare environments, namely variables that can be used by the step
#
# * step_name: name of the step, can be used by step process to determine
# actions dynamically.
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set("__last_step__", self.step.last_step)
self.log("start")
env.sos_dict.set(
"step_id",
textMD5(
f'{env.sos_dict["workflow_id"]} {env.sos_dict["step_name"]} {self.step.md5}'
),
)
env.sos_dict.set("master_id", env.config["master_id"])
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
env.sos_dict.set("_runtime", {})
# * input: input files, which should be __step_output__ if it is defined, or
# None otherwise.
# * _input: first batch of input, which should be input if no input statement is used
# * output: None at first, can be redefined by output statement
# * _output: None at first, can be redefined by output statement
# * depends: None at first, can be redefined by depends statement
# * _depends: None at first, can be redefined by depends statement
#
self.init_input_output_vars()
# _index is needed for pre-input action's active option and for debug output of scripts
env.sos_dict.set("_index", 0)
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file(
"STEP",
f'Executing step {env.sos_dict["step_name"]} with step_input {env.sos_dict["step_input"]} and step_output {env.sos_dict["step_output"]}',
)
self.set_task_queue_from_task_params()
# look for input statement.
input_statement_idx = [
idx
for idx, x in enumerate(self.step.statements)
if x[0] == ":" and x[1] == "input"
]
if not input_statement_idx:
input_statement_idx = None
elif len(input_statement_idx) == 1:
input_statement_idx = input_statement_idx[0]
else:
raise ValueError(
f"More than one step input are specified in step {self.step.step_name(True)}"
)
# if shared is true, we have to disable concurrent because we
# do not yet return anything from shared.
self.concurrent_substep = "shared" not in self.step.options
# and \
# ('concurrent' not in env.sos_dict['_runtime'] or env.sos_dict['_runtime']['concurrent'] is True)
if input_statement_idx is not None:
# execute before input stuff
for statement in self.step.statements[:input_statement_idx]:
if statement[0] == ":":
# wait for all dependent targets to be resolved to be resolved
key, value = statement[1:3]
if key != "depends":
raise ValueError(f"Step input should be specified before {key}")
while True:
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(dfiles, **kwargs)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
break
else:
try:
# 1354
# if there are definition before input, the definitions and imports
# must be added to global_def in order to be executed by substeps
if any(x in statement[1] for x in ("class", "def", "import")):
step_def = KeepOnlyImportAndDefine().visit(
ast.parse(statement[1])
)
if step_def.body:
if isinstance(self.step.global_def, ast.Module):
self.step.global_def.body.extend(step_def.body)
else:
self.step.global_def = step_def
self.execute(statement[1])
except StopInputGroup as e:
# stop before substeps, because there is no output statement before it
# we do not have to worry about keep_output
if e.message:
env.logger.info(e.message)
return self.collect_result()
# input statement
stmt = self.step.statements[input_statement_idx][2]
self.log("input statement", stmt)
while True:
# wait for all targets to be resovled
try:
args, kwargs = SoS_eval(
f"__null_func__({stmt})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# Files will be expanded differently with different running modes
input_files: sos_targets = expand_input_files(
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_INPUT_OPTIONS
},
)
runner = self.process_input_args(
input_files,
**{k: v for k, v in kwargs.items() if k in SOS_INPUT_OPTIONS},
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
self._substeps = e.value
#
if "concurrent" in kwargs and self.concurrent_substep:
# concurrent can be True/False or an integer
self.concurrent_substep = kwargs["concurrent"]
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise ValueError(f"Failed to process input statement {stmt}: {e}")
break
input_statement_idx += 1
elif env.sos_dict["step_input"].groups:
# if default has groups...
# default case
self._substeps = env.sos_dict["step_input"].groups
# assuming everything starts from 0 is after input
input_statement_idx = 0
else:
# default case
self._substeps = [env.sos_dict["step_input"]]
# assuming everything starts from 0 is after input
input_statement_idx = 0
self.proc_results = {}
self.vars_to_be_shared = set()
if "shared" in self.step.options:
self.vars_to_be_shared = parse_shared_vars(self.step.options["shared"])
self.vars_to_be_shared = sorted(
[
x[5:] if x.startswith("step_") else x
for x in self.vars_to_be_shared
if x not in ("step_", "step_input", "step_output", "step_depends")
]
)
self.shared_vars = [{} for x in self._substeps]
# run steps after input statement, which will be run multiple times for each input
# group.
env.sos_dict.set("__num_groups__", len(self._substeps))
# determine if a single index or the whole step should be skipped
skip_index = False
# signatures of each index, which can remain to be None if no output
# is defined.
self.output_groups = [sos_targets([]) for x in self._substeps]
self.depends_groups = [sos_targets([]) for x in self._substeps]
# used to prevent overlapping output from substeps
self._all_outputs = set()
self._subworkflow_results = []
if (
any("sos_run" in x[1] for x in self.step.statements[input_statement_idx:])
and "shared" not in self.step.options
and not self.step.task
and self.step.statements[-1][0] == "!"
and (len(self.step.statements) == 1 or self.step.statements[-2][0] == ":")
and is_sos_run_the_only_last_stmt(self.step.statements[-1][1])
):
env.sos_dict.set("__concurrent_subworkflow__", True)
if self.concurrent_substep:
if len(self._substeps) <= 1 or env.config["run_mode"] == "dryrun":
self.concurrent_substep = False
elif any(
"sos_run" in x[1] for x in self.step.statements[input_statement_idx:]
):
self.concurrent_substep = False
env.logger.debug(
"Substeps are executed sequentially because of existence of multiple nested workflow."
)
else:
self.prepare_substep()
try:
self.completed["__substep_skipped__"] = 0
self.completed["__substep_completed__"] = len(self._substeps)
self._completed_concurrent_substeps = 0
# pending signatures are signatures for steps with external tasks
self.pending_signatures = [None for x in self._substeps]
for idx, g in enumerate(self._substeps):
#
# https://github.com/vatlab/sos/issues/1376
#
# [default]
# input: for_each=dict(i=range(1000))
# sos_run('a', t=i)
#
# when we have workflow like the following when steps
# are executed quickly with subworkflows submitted to the master
# the master process could be swamped with subworkflows, causing
# "too many open files".
#
# the following code will stop the step from continued
# execution and wait for the subworkflows to complete.
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(
allow_pending=env.config["worker_procs"]
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# other variables
#
_vars = {}
# now, let us expose target level variables as lists
if len(g) > 1:
names = set.union(*[set(x._dict.keys()) for x in g._targets])
elif len(g) == 1:
names = set(g._targets[0]._dict.keys())
else:
names = set()
for name in names:
_vars[name] = [x.get(name) for x in g._targets]
# then we expose all group level variables
_vars.update(g._dict)
_vars.update(env.sos_dict["step_input"]._dict)
env.sos_dict.update(_vars)
env.sos_dict.set("_input", copy.deepcopy(g))
# set vars to _input
# env.sos_dict['_input'].set(**v)
self.log("_input")
env.sos_dict.set("_index", idx)
if env.config["error_mode"] == "ignore":
missed = [x for x in g.targets if not x.target_exists()]
if missed:
if any(isinstance(x, invalid_target) for x in missed):
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to invalid input caused by previous failed substep.'
)
else:
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to missing input {sos_targets(missed)}'
)
self.output_groups[idx] = sos_targets(invalid_target())
env.sos_dict.set("_output", sos_targets(invalid_target()))
self.skip_substep()
continue
# in interactive mode, because sos_dict are always shared
# execution of a substep, especially when it calls a nested
# workflow, would change step_name, __step_context__ etc, and
# we will have to reset these variables to make sure the next
# substep would execute normally. Batch mode is immune to this
# problem because nested workflows are executed in their own
# process/context etc
if env.config["run_mode"] == "interactive":
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set(
"step_id",
hash(
(
env.sos_dict["workflow_id"],
env.sos_dict["step_name"],
self.step.md5,
)
),
)
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
#
pre_statement = []
if (
not any(
st[0] == ":" and st[1] == "output"
for st in self.step.statements[input_statement_idx:]
)
and "__default_output__" in env.sos_dict
):
pre_statement = [[":", "output", "_output"]]
# if there is no statement, no task, claim success
post_statement = []
if not self.step.statements or self.step.statements[-1][0] != "!":
if self.step.task:
# if there is only task, we insert a fake statement so that it can be executed by the executor
post_statement = [["!", ""]]
else:
# complete case: no step, no statement
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
all_statements = (
pre_statement
+ self.step.statements[input_statement_idx:]
+ post_statement
)
self.is_input_verified = True
for statement_idx, statement in enumerate(all_statements):
is_last_runblock = statement_idx == len(all_statements) - 1
# if input is undertermined, we can only process output:
if not g.valid() and statement[0] != ":":
raise RuntimeError("Undetermined input encountered")
if statement[0] == ":":
key, value = statement[1:3]
# output, depends, and process can be processed multiple times
while True:
# loop for all unresolved targets to be resolved
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# dynamic output or dependent files
if key == "output":
# if output is defined, its default value needs to be cleared
if idx == 0:
env.sos_dict.set("step_output", sos_targets())
ofiles: sos_targets = expand_output_files(
value,
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_OUTPUT_OPTIONS
},
)
if g.valid() and ofiles.valid():
if any(
x in g._targets
for x in ofiles
if not isinstance(x, sos_step)
):
raise RuntimeError(
f'Overlapping input and output files: {", ".join(repr(x) for x in ofiles if x in g)}'
)
# set variable _output and output
self.process_output_args(
ofiles,
**{
k: v
for k, v in kwargs.items()
if k in SOS_OUTPUT_OPTIONS
},
)
self.output_groups[idx] = env.sos_dict["_output"]
elif key == "depends":
try:
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(
dfiles, **kwargs
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
self.depends_groups[idx] = env.sos_dict[
"_depends"
]
self.log("_depends")
except Exception:
# env.logger.info(e)
raise
else:
raise RuntimeError(f"Unrecognized directive {key}")
# everything is ok, break
break
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
# if input is Undertermined, it is possible that output cannot be processed
# due to that, and we just return
if not g.valid():
env.logger.debug(e)
return self.collect_result()
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
elif is_last_runblock:
if (
env.config["sig_mode"] == "skip"
and not self.vars_to_be_shared
and "sos_run" not in statement[1]
and not env.sos_dict["_output"].unspecified()
and len(env.sos_dict["_output"]) > 0
and all(
x.target_exists()
for x in env.sos_dict["_output"].targets
)
and env.sos_dict["_output"].later_than(
env.sos_dict["_input"]
)
):
self.skip_substep()
env.logger.info(
f'``{env.sos_dict["step_name"]}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``skipped`` with existing output.'
)
skip_index = True
# do not execute the rest of the statement
break
#
# default mode, check if skipping substep
sig = None
if (
env.config["sig_mode"]
not in ("ignore", "distributed", "build")
and not env.sos_dict["_output"].unspecified()
):
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if skip_index:
# matched["output"] might hav vars not defined in "output" #1355
env.sos_dict.set("_output", matched["output"])
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
self.skip_substep()
break
try:
if self.concurrent_substep:
self.concurrent_exec(statement, sig)
# we check if the previous task has been completed and process them
# because further steps might need to be done
try:
runner = self.process_returned_substep_result(
till=idx + 1, wait=False
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
elif (
env.config["sig_mode"] == "ignore"
or env.sos_dict["_output"].unspecified()
):
self.local_exec_without_signature(statement)
else:
skip_index = self.local_exec_with_signature(
statement, sig
)
if skip_index:
self.skip_substep()
break
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception as e:
clear_output()
if env.config["error_mode"] == "abort":
raise
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"{self.step.step_name(True)} {idx_msg} returns no output due to error: {e}"
)
self.output_groups[idx] = sos_targets(invalid_target())
skip_index = True
else:
if env.config["run_mode"] != "interactive":
# default mode
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.error(
f"{self.step.step_name(True)} {idx_msg} returns an error."
)
self.exec_error.append(str(idx), e)
else:
# if it is not the last statement group (e.g. statements before :output)
# we execute locally without anything like signature
if self.is_input_verified:
verify_input()
self.is_input_verified = False
try:
self.execute(statement[1])
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception:
clear_output()
raise
# if there is no statement , but there are tasks, we should
# check signature here.
if (
(not self.step.statements or self.step.statements[-1][0] != "!")
and self.step.task
and not self.concurrent_substep
and env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
):
skip_index = self.check_task_sig()
# if this index is skipped, go directly to the next one
if skip_index:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
skip_index = False
continue
# if concurrent input group, tasks are handled in substep
if self.concurrent_substep or not self.step.task:
continue
if env.config["run_mode"] == "dryrun" and env.sos_dict["_index"] != 0:
continue
# # check if the task is active
# if 'active' in env.sos_dict['_runtime']:
# if not self.is_task_active():
# continue
#
self.log("task")
try:
task_id, taskdef, task_vars = create_task(
self.step.global_def,
self.step.global_vars,
self.step.task,
self.step.task_params,
)
task = self.submit_task(
{
"index": env.sos_dict["_index"],
"task_id": task_id,
"task_def": taskdef,
"task_vars": task_vars,
}
)
self.proc_results[env.sos_dict["_index"]] = task
except Exception as e:
# FIXME: cannot catch exception from subprocesses
if env.verbosity > 2:
sys.stderr.write(get_traceback())
raise RuntimeError(
f'Failed to execute process\n"{short_repr(self.step.task)}"\n{e}'
)
#
# # if not concurrent, we have to wait for the completion of the task
# if 'concurrent' in env.sos_dict['_runtime'] and env.sos_dict[
# '_runtime']['concurrent'] is False:
# # in this case the steps must be executed not concurrently
# runner = self.wait_for_results(all_submitted=False)
# try:
# yreq = next(runner)
# while True:
# yres = yield yreq
# yreq = runner.send(yres)
# except StopIteration:
# pass
#
# endfor loop for each input group
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(allow_pending=0)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.pop("__concurrent_subworkflow__")
runner = self.wait_for_results(all_submitted=True)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
for idx, res in self.proc_results.items():
if "sig_skipped" in res:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
if "output" in res:
self.output_groups[idx] = res["output"]
# check results
self.check_results()
# if error happened but we allow all substeps to be completed, we now
# raise exception
if self.exec_error.errors:
raise self.exec_error
# if output is Undetermined, re-evalulate it
# finalize output from output_groups because some output might be skipped
# this is the final version of the output but we do maintain output
# during the execution of step, for compatibility.
env.sos_dict.set(
"step_output", sos_targets([])._add_groups(self.output_groups)
)
env.sos_dict.set(
"step_depends", sos_targets([])._add_groups(self.depends_groups)
)
# if there exists an option shared, the variable would be treated as
# provides=sos_variable(), and then as step_output
if "shared" in self.step.options:
self.shared_vars = evaluate_shared(
self.shared_vars, self.step.options["shared"]
)
env.sos_dict.quick_update(self.shared_vars)
missing = self.verify_output()
self.log(
"output",
msg=f'\033[95m missing: {short_repr(missing)} ({len(missing)} item{"s" if len(missing)>1 else ""})\033[0m'
if len(missing) > 0
else "",
)
self.calculate_completed()
def file_only(targets):
if not isinstance(targets, sos_targets):
env.logger.warning(
f"Unexpected input or output target for reporting. Empty list returned: {targets}"
)
return []
return [
(str(x), x.size())
for x in targets._targets
if isinstance(x, file_target)
]
step_info = {
"step_id": self.step.md5,
"start_time": self.start_time,
"stepname": self.step.step_name(True),
"substeps": len(self._substeps),
"input": file_only(env.sos_dict["step_input"]),
"output": file_only(env.sos_dict["step_output"]),
"completed": dict(self.completed),
"end_time": time.time(),
}
send_message_to_controller(
["workflow_sig", "step", env.sos_dict["workflow_id"], repr(step_info)]
)
return self.collect_result()
finally:
if self.concurrent_substep:
close_socket(self.result_pull_socket, "substep collector")
class Step_Executor(Base_Step_Executor):
def __init__(self, step, socket, mode="run"):
self.run_mode = mode
env.config["run_mode"] = mode
super(Step_Executor, self).__init__(step)
self.socket = socket
# because step is executed in a separate SoS_Worker process, this
# __socket__ is available to all the actions that will be executed
# in the step
env.__socket__ = socket
def submit_tasks(self, tasks):
if "TASK" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("TASK", f"Send {tasks}")
self.socket.send(
encode_msg(["tasks", env.sos_dict["_runtime"]["queue"]] + tasks)
)
def wait_for_tasks(self, tasks, all_submitted):
# wait for task is a generator function that yields the request
# to the runner
if not tasks:
return {}
# when we wait, the "outsiders" also need to see the tags etc
# of the tasks so we have to write to the database. #156
send_message_to_controller(["commit_sig"])
# wait till the executor responde
results = {}
while True:
# yield an indicator of what is requested, for debugging purpose
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
results.update(res)
# all results have been obtained.
if len(results) == len(tasks):
break
return results
def wait_for_subworkflows(self, allow_pending):
try:
allow_pending = int(allow_pending)
except:
allow_pending = min(max(os.cpu_count() // 2, 2), 8)
while self._subworkflow_results:
if allow_pending > 0:
n_pending = sum(
len(x["pending_workflows"]) for x in self._subworkflow_results
)
if n_pending <= allow_pending:
break
# here we did not check if workflow ids match
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
elif isinstance(res, Exception):
raise res
if not "__workflow_id__" in res:
raise ValueError(f"Unrecognized result from subworkflows: {res}")
# remove from _self._subworkflow_results
result_with_id = [
idx
for idx, x in enumerate(self._subworkflow_results)
if res["__workflow_id__"] in x["pending_workflows"]
]
if not result_with_id:
raise RuntimeError(
f"Failed to identify ID of returned subworkflow: {res}"
)
if len(result_with_id) > 1:
raise RuntimeError(
"Multiple matches of subworkflow ID. This should not happen."
)
self._subworkflow_results[result_with_id[0]]["pending_workflows"].remove(
res["__workflow_id__"]
)
if not self._subworkflow_results[result_with_id[0]]["pending_workflows"]:
self._subworkflow_results.pop(result_with_id[0])
def handle_unknown_target(self, e):
self.socket.send(encode_msg(["missing_target", e.target]))
yield self.socket
res = decode_msg(self.socket.recv())
if not res:
raise e
def verify_dynamic_targets(self, targets):
if not targets:
return
if env.config["trace_existing"]:
traced = targets
else:
traced = [x for x in targets if x.traced]
if not traced:
return
self.socket.send(encode_msg(["dependent_target"] + traced))
yield self.socket
res = decode_msg(self.socket.recv())
if res != "target_resolved":
raise RuntimeError(f"Failed to veryify dependent target {traced}")
def run(self):
try:
try:
# 1218
runner = Base_Step_Executor.run(self)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
res = e.value
if self.socket is not None:
if (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends result {short_repr(res)}",
)
self.socket.send(encode_msg(res))
else:
return res
except RemovedTarget as e:
# removed target needs to be handled differently since the workflow manager
# use type information to get removed targets
if self.socket is not None and not self.socket.closed:
self.socket.send(encode_msg(e))
else:
raise e
except Exception as e:
if env.verbosity > 2:
sys.stderr.write(get_traceback())
if isinstance(e, ProcessKilled):
raise
# if not self.exec_error
if e != self.exec_error:
self.exec_error.append(self.step.step_name(), e)
#
if self.exec_error.errors:
if self.socket is not None and not self.socket.closed:
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends exception {self.exec_error}",
)
self.socket.send(encode_msg(self.exec_error))
else:
raise self.exec_error
| true
| true
|
f715866e13eb002c14554c06f9cadbe3ff57a70a
| 322
|
py
|
Python
|
generator/framework/util/fs.py
|
sinsay/ds_generator
|
9365e22e8730418caf29b8ed6ada1f30f936a297
|
[
"Apache-2.0"
] | null | null | null |
generator/framework/util/fs.py
|
sinsay/ds_generator
|
9365e22e8730418caf29b8ed6ada1f30f936a297
|
[
"Apache-2.0"
] | null | null | null |
generator/framework/util/fs.py
|
sinsay/ds_generator
|
9365e22e8730418caf29b8ed6ada1f30f936a297
|
[
"Apache-2.0"
] | null | null | null |
import os
def mkdir_without_exception(target):
try:
# subprocess.call([
# "mkdir",
# "-p",
# target
# ])
os.makedirs(target, exist_ok=True)
except FileExistsError:
print("the directory %s already exists. continue the next gen phase." % target)
| 23
| 87
| 0.552795
|
import os
def mkdir_without_exception(target):
try:
os.makedirs(target, exist_ok=True)
except FileExistsError:
print("the directory %s already exists. continue the next gen phase." % target)
| true
| true
|
f71586bd484ad9828fd3d9ba20d058a77b29f8ff
| 91
|
py
|
Python
|
app/handlers/__init__.py
|
Katel212/MyPersonalKitchenBot
|
03de0beeaf2665e8b3ddd1709da3d4edcd422b80
|
[
"MIT"
] | null | null | null |
app/handlers/__init__.py
|
Katel212/MyPersonalKitchenBot
|
03de0beeaf2665e8b3ddd1709da3d4edcd422b80
|
[
"MIT"
] | 5
|
2020-12-22T17:53:05.000Z
|
2021-04-07T20:00:47.000Z
|
app/handlers/__init__.py
|
Katel212/MyPersonalKitchenBot
|
03de0beeaf2665e8b3ddd1709da3d4edcd422b80
|
[
"MIT"
] | null | null | null |
from .errors import *
from .private import *
from .callback import *
from .states import *
| 18.2
| 23
| 0.736264
|
from .errors import *
from .private import *
from .callback import *
from .states import *
| true
| true
|
f71586c2e3611f2c07d319406a22e6a386a06e89
| 695
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
martinramirezboggio/recipe-app-api
|
8f576ae036ba9a55e75a76465e97e0340378572e
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
martinramirezboggio/recipe-app-api
|
8f576ae036ba9a55e75a76465e97e0340378572e
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
martinramirezboggio/recipe-app-api
|
8f576ae036ba9a55e75a76465e97e0340378572e
|
[
"MIT"
] | null | null | null |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""django command to pause execution until db is ready"""
def handle(self, *args, **options):
"""handle the command"""
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| 30.217391
| 78
| 0.638849
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| true
| true
|
f71587d10ef1d1aed4af3fd809bfa4096755e581
| 7,494
|
py
|
Python
|
e2e/tests/selenium/page_objects.py
|
p2pu/learning-circles
|
ccd94208ec18082f8fda6d7f21eacdd71bad6023
|
[
"MIT"
] | 10
|
2016-05-03T20:41:25.000Z
|
2021-09-17T18:42:01.000Z
|
e2e/tests/selenium/page_objects.py
|
p2pu/learning-circles
|
ccd94208ec18082f8fda6d7f21eacdd71bad6023
|
[
"MIT"
] | 655
|
2016-05-04T19:00:35.000Z
|
2022-03-28T13:09:20.000Z
|
e2e/tests/selenium/page_objects.py
|
p2pu/learning-circles
|
ccd94208ec18082f8fda6d7f21eacdd71bad6023
|
[
"MIT"
] | 8
|
2016-05-06T10:24:27.000Z
|
2020-10-21T00:56:59.000Z
|
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from e2e.tests.selenium.locators import LearningCircleCreationPageLocators
from e2e.tests.selenium.locators import RegistrationModalLocators
import datetime
import time
class BasePage(object):
def __init__(self, driver, wait):
self.driver = driver
self.wait = wait
def fill_text_field(self, locator, *text):
input_field = self.driver.find_element(*locator)
try:
input_field.clear()
except:
pass
finally:
input_field.send_keys(*text)
def fill_rich_text_field(self, locator, *text):
tinymce_iframe = self.wait.until(expected_conditions.presence_of_element_located(locator))
self.driver.switch_to_frame(tinymce_iframe)
rich_text_field = self.wait.until(expected_conditions.presence_of_element_located(LearningCircleCreationPageLocators.TINYMCE_FIELD))
rich_text_field.send_keys(*text)
self.driver.switch_to_default_content()
class LearningCircleCreationPage(BasePage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fill_out_form_correctly(self):
self.select_first_course()
self.click_next_button()
self.fill_city_select_field("Kitchener")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_NAME_FIELD, "KPL")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_DETAILS_FIELD, "Hacienda Cafe")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_ADDRESS_FIELD, "85 Queen St N, Kitchener")
self.click_next_button()
self.select_start_date()
self.select_suggested_dates()
self.wait.until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '#selected-dates li')))
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_TIME_FIELD, "7:00 PM", Keys.ENTER)
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_END_TIME_FIELD, "8:00 PM", Keys.ENTER)
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.TITLE_FIELD, "Sharon's Learning Circle")
self.fill_rich_text_field(LearningCircleCreationPageLocators.DESCRIPTION_FIELD, "Welcome to my learning circle!")
self.fill_rich_text_field(LearningCircleCreationPageLocators.COURSE_DESCRIPTION_FIELD, "This is the course description")
self.fill_text_field(LearningCircleCreationPageLocators.SIGNUP_QUESTION_FIELD, "What do you want to learn?")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_WEBSITE_FIELD, "https://www.kpl.org")
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_GOAL_FIELD, "Have a great learning circle")
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_CONCERNS_FIELD, "Nothing really")
def select_start_date(self):
calendar_date = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CALENDAR_TODAY))
calendar_date.click()
def select_suggested_dates(self):
btn = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ACCEPT_SUGGESTED_DATES_BUTTON))
# use this instead of btn.click() since the button is out of view
self.driver.execute_script("return arguments[0].click();", btn)
def select_first_course(self):
course_cards = self.wait.until(expected_conditions.visibility_of_all_elements_located(LearningCircleCreationPageLocators.COURSE_CARDS))
self.wait.until(expected_conditions.text_to_be_present_in_element(LearningCircleCreationPageLocators.FIRST_COURSE_TITLE, "Academic Writing"))
course_select_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.FIRST_COURSE_BUTTON))
# button is out of view
self.driver.execute_script("return arguments[0].click();", course_select_button)
# wait until search container is gone
self.wait.until_not(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '.search-container')))
remove_link = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators. REMOVE_COURSE_SELECTION_LINK))
assert 'Remove selection' in remove_link.text
def fill_city_select_field(self, location):
city_select = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators.CITY_SELECT_INPUT))
city_select.send_keys(location)
self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CITY_SELECT_OPTION))
city_select.send_keys(Keys.ENTER)
def click_next_button(self):
next_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.NEXT_TAB_BUTTON))
next_button.click()
def click_publish_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.PUBLISH_BUTTON))
publish_button.click()
def click_save_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SAVE_BUTTON))
publish_button.click()
def click_modal_button(self):
modal_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.MODAL_BUTTON))
modal_button.click()
def click_schedule_meetings_button(self):
meetings_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SCHEDULE_MEETINGS_BUTTON))
meetings_button.click()
def click_login_link(self):
self.driver.find_element_by_css_selector('.registration-modal-content button:first-child').click()
def fill_out_login_modal(self, user_data):
self.fill_text_field(RegistrationModalLocators.EMAIL_FIELD, user_data["email"])
self.fill_text_field(RegistrationModalLocators.PASSWORD_FIELD, user_data["password"])
self.driver.find_element(*RegistrationModalLocators.SUBMIT_BUTTON).click()
def go_to_tab_1(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_1))
tab_button.click()
def go_to_tab_2(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_2))
tab_button.click()
def go_to_tab_3(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_3))
tab_button.click()
def go_to_tab_4(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_4))
tab_button.click()
def go_to_tab_5(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_5))
tab_button.click()
def close_alert(self):
close_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ALERT_CLOSE_BUTTON))
close_button.click()
| 49.96
| 154
| 0.777022
|
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from e2e.tests.selenium.locators import LearningCircleCreationPageLocators
from e2e.tests.selenium.locators import RegistrationModalLocators
import datetime
import time
class BasePage(object):
def __init__(self, driver, wait):
self.driver = driver
self.wait = wait
def fill_text_field(self, locator, *text):
input_field = self.driver.find_element(*locator)
try:
input_field.clear()
except:
pass
finally:
input_field.send_keys(*text)
def fill_rich_text_field(self, locator, *text):
tinymce_iframe = self.wait.until(expected_conditions.presence_of_element_located(locator))
self.driver.switch_to_frame(tinymce_iframe)
rich_text_field = self.wait.until(expected_conditions.presence_of_element_located(LearningCircleCreationPageLocators.TINYMCE_FIELD))
rich_text_field.send_keys(*text)
self.driver.switch_to_default_content()
class LearningCircleCreationPage(BasePage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fill_out_form_correctly(self):
self.select_first_course()
self.click_next_button()
self.fill_city_select_field("Kitchener")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_NAME_FIELD, "KPL")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_DETAILS_FIELD, "Hacienda Cafe")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_ADDRESS_FIELD, "85 Queen St N, Kitchener")
self.click_next_button()
self.select_start_date()
self.select_suggested_dates()
self.wait.until(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '#selected-dates li')))
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_TIME_FIELD, "7:00 PM", Keys.ENTER)
self.fill_text_field(LearningCircleCreationPageLocators.MEETING_END_TIME_FIELD, "8:00 PM", Keys.ENTER)
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.TITLE_FIELD, "Sharon's Learning Circle")
self.fill_rich_text_field(LearningCircleCreationPageLocators.DESCRIPTION_FIELD, "Welcome to my learning circle!")
self.fill_rich_text_field(LearningCircleCreationPageLocators.COURSE_DESCRIPTION_FIELD, "This is the course description")
self.fill_text_field(LearningCircleCreationPageLocators.SIGNUP_QUESTION_FIELD, "What do you want to learn?")
self.fill_text_field(LearningCircleCreationPageLocators.VENUE_WEBSITE_FIELD, "https://www.kpl.org")
self.click_next_button()
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_GOAL_FIELD, "Have a great learning circle")
self.fill_text_field(LearningCircleCreationPageLocators.FACILITATOR_CONCERNS_FIELD, "Nothing really")
def select_start_date(self):
calendar_date = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CALENDAR_TODAY))
calendar_date.click()
def select_suggested_dates(self):
btn = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ACCEPT_SUGGESTED_DATES_BUTTON))
# use this instead of btn.click() since the button is out of view
self.driver.execute_script("return arguments[0].click();", btn)
def select_first_course(self):
course_cards = self.wait.until(expected_conditions.visibility_of_all_elements_located(LearningCircleCreationPageLocators.COURSE_CARDS))
self.wait.until(expected_conditions.text_to_be_present_in_element(LearningCircleCreationPageLocators.FIRST_COURSE_TITLE, "Academic Writing"))
course_select_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.FIRST_COURSE_BUTTON))
# button is out of view
self.driver.execute_script("return arguments[0].click();", course_select_button)
# wait until search container is gone
self.wait.until_not(expected_conditions.presence_of_element_located((By.CSS_SELECTOR, '.search-container')))
remove_link = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators. REMOVE_COURSE_SELECTION_LINK))
assert 'Remove selection' in remove_link.text
def fill_city_select_field(self, location):
city_select = self.wait.until(expected_conditions.visibility_of_element_located(LearningCircleCreationPageLocators.CITY_SELECT_INPUT))
city_select.send_keys(location)
self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.CITY_SELECT_OPTION))
city_select.send_keys(Keys.ENTER)
def click_next_button(self):
next_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.NEXT_TAB_BUTTON))
next_button.click()
def click_publish_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.PUBLISH_BUTTON))
publish_button.click()
def click_save_button(self):
publish_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SAVE_BUTTON))
publish_button.click()
def click_modal_button(self):
modal_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.MODAL_BUTTON))
modal_button.click()
def click_schedule_meetings_button(self):
meetings_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.SCHEDULE_MEETINGS_BUTTON))
meetings_button.click()
def click_login_link(self):
self.driver.find_element_by_css_selector('.registration-modal-content button:first-child').click()
def fill_out_login_modal(self, user_data):
self.fill_text_field(RegistrationModalLocators.EMAIL_FIELD, user_data["email"])
self.fill_text_field(RegistrationModalLocators.PASSWORD_FIELD, user_data["password"])
self.driver.find_element(*RegistrationModalLocators.SUBMIT_BUTTON).click()
def go_to_tab_1(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_1))
tab_button.click()
def go_to_tab_2(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_2))
tab_button.click()
def go_to_tab_3(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_3))
tab_button.click()
def go_to_tab_4(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_4))
tab_button.click()
def go_to_tab_5(self):
tab_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.TAB_5))
tab_button.click()
def close_alert(self):
close_button = self.wait.until(expected_conditions.element_to_be_clickable(LearningCircleCreationPageLocators.ALERT_CLOSE_BUTTON))
close_button.click()
| true
| true
|
f71589db678f6272d81bf39a0e17b2bd21472491
| 8,808
|
py
|
Python
|
postman/forms.py
|
StriveForBest/django-postman
|
25f5fcf5a6d54dbb22b393432701652c21e49552
|
[
"BSD-3-Clause"
] | null | null | null |
postman/forms.py
|
StriveForBest/django-postman
|
25f5fcf5a6d54dbb22b393432701652c21e49552
|
[
"BSD-3-Clause"
] | null | null | null |
postman/forms.py
|
StriveForBest/django-postman
|
25f5fcf5a6d54dbb22b393432701652c21e49552
|
[
"BSD-3-Clause"
] | 2
|
2015-04-30T13:46:16.000Z
|
2019-09-16T06:55:14.000Z
|
"""
You may define your own custom forms, based or inspired by the following ones.
Examples of customization:
recipients = CommaSeparatedUserField(label=("Recipients", "Recipient"),
min=2,
max=5,
user_filter=my_user_filter,
channel='my_channel',
)
can_overwrite_limits = False
exchange_filter = staticmethod(my_exchange_filter)
"""
from __future__ import unicode_literals
from django import forms
from django.conf import settings
try:
from django.contrib.auth import get_user_model # Django 1.5
except ImportError:
from postman.future_1_5 import get_user_model
from django.db import transaction
from django.utils.translation import ugettext, ugettext_lazy as _
from postman.fields import CommaSeparatedUserField
from postman.models import Message
from postman.utils import WRAP_WIDTH
class BaseWriteForm(forms.ModelForm):
"""The base class for other forms."""
class Meta:
model = Message
fields = ('body',)
widgets = {
# for better confort, ensure a 'cols' of at least
# the 'width' of the body quote formatter.
'body': forms.Textarea(attrs={'cols': WRAP_WIDTH, 'rows': 12}),
}
error_css_class = 'error'
required_css_class = 'required'
def __init__(self, *args, **kwargs):
sender = kwargs.pop('sender', None)
exchange_filter = kwargs.pop('exchange_filter', None)
user_filter = kwargs.pop('user_filter', None)
max = kwargs.pop('max', None)
channel = kwargs.pop('channel', None)
self.site = kwargs.pop('site', None)
super(BaseWriteForm, self).__init__(*args, **kwargs)
self.fields['body'].widget.attrs['placeholder'] = 'Write a message'
if 'subject' in self.fields:
self.fields['subject'].widget.attrs['placeholder'] = 'Subject'
self.instance.sender = sender if (sender and sender.is_authenticated()) else None
if exchange_filter:
self.exchange_filter = exchange_filter
if 'recipients' in self.fields:
self.fields['recipients'].widget.attrs['placeholder'] = 'Recipients'
if user_filter and hasattr(self.fields['recipients'], 'user_filter'):
self.fields['recipients'].user_filter = user_filter
if getattr(settings, 'POSTMAN_DISALLOW_MULTIRECIPIENTS', False):
max = 1
if max is not None and hasattr(self.fields['recipients'], 'set_max') \
and getattr(self, 'can_overwrite_limits', True):
self.fields['recipients'].set_max(max)
if channel and hasattr(self.fields['recipients'], 'set_arg'):
self.fields['recipients'].set_arg(channel)
error_messages = {
'filtered': _("Writing to some users is not possible: {users}."),
'filtered_user': _("{username}"),
'filtered_user_with_reason': _("{username} ({reason})"),
}
def clean_recipients(self):
"""Check no filter prohibit the exchange."""
recipients = self.cleaned_data['recipients']
exchange_filter = getattr(self, 'exchange_filter', None)
if exchange_filter:
errors = []
filtered_names = []
recipients_list = recipients[:]
for u in recipients_list:
try:
reason = exchange_filter(self.instance.sender, u, recipients_list)
if reason is not None:
recipients.remove(u)
filtered_names.append(
self.error_messages[
'filtered_user_with_reason' if reason else 'filtered_user'
].format(username=u.get_username(), reason=reason)
)
except forms.ValidationError as e:
recipients.remove(u)
errors.extend(e.messages)
if filtered_names:
errors.append(self.error_messages['filtered'].format(users=', '.join(filtered_names)))
if errors:
raise forms.ValidationError(errors)
return recipients
def save(self, recipient=None, parent=None, auto_moderators=[]):
"""
Save as many messages as there are recipients.
Additional actions:
- If it's a reply, build a conversation
- Call auto-moderators
- Notify parties if needed
Return False if one of the messages is rejected.
"""
recipients = self.cleaned_data.get('recipients', [])
if parent and not parent.thread_id: # at the very first reply, make it a conversation
parent.thread = parent
parent.save()
# but delay the setting of parent.replied_at to the moderation step
if parent:
self.instance.parent = parent
self.instance.thread_id = parent.thread_id
initial_moderation = self.instance.get_moderation()
initial_dates = self.instance.get_dates()
initial_status = self.instance.moderation_status
if recipient:
if isinstance(recipient, get_user_model()) and recipient in recipients:
recipients.remove(recipient)
recipients.insert(0, recipient)
is_successful = True
for r in recipients:
if isinstance(r, get_user_model()):
self.instance.recipient = r
else:
self.instance.recipient = None
self.instance.email = r
self.instance.pk = None # force_insert=True is not accessible from here
self.instance.auto_moderate(auto_moderators)
self.instance.clean_moderation(initial_status)
self.instance.clean_for_visitor()
super(BaseWriteForm, self).save()
if self.instance.is_rejected():
is_successful = False
self.instance.update_parent(initial_status)
self.instance.notify_users(initial_status, self.site)
# some resets for next reuse
if not isinstance(r, get_user_model()):
self.instance.email = ''
self.instance.set_moderation(*initial_moderation)
self.instance.set_dates(*initial_dates)
return is_successful
# commit_on_success() is deprecated in Django 1.6 and will be removed in Django 1.8
save = transaction.atomic(save) if hasattr(transaction, 'atomic') else transaction.commit_on_success(save)
class WriteForm(BaseWriteForm):
"""The form for an authenticated user, to compose a message."""
# specify help_text only to avoid the possible default 'Enter text to search.' of ajax_select v1.2.5
recipients = CommaSeparatedUserField(label=(_("Recipients"), _("Recipient")), help_text='')
class Meta(BaseWriteForm.Meta):
fields = ('recipients', 'subject', 'body')
class AnonymousWriteForm(BaseWriteForm):
"""The form for an anonymous user, to compose a message."""
# The 'max' customization should not be permitted here.
# The features available to anonymous users should be kept to the strict minimum.
can_overwrite_limits = False
email = forms.EmailField(label=_("Email"))
recipients = CommaSeparatedUserField(label=(_("Recipients"), _("Recipient")), help_text='', max=1) # one recipient is enough
class Meta(BaseWriteForm.Meta):
fields = ('email', 'recipients', 'subject', 'body')
class BaseReplyForm(BaseWriteForm):
"""The base class for a reply to a message."""
def __init__(self, *args, **kwargs):
recipient = kwargs.pop('recipient', None)
super(BaseReplyForm, self).__init__(*args, **kwargs)
self.recipient = recipient
def clean(self):
"""Check that the recipient is correctly initialized."""
if not self.recipient:
raise forms.ValidationError(ugettext("Undefined recipient."))
return super(BaseReplyForm, self).clean()
def save(self, *args, **kwargs):
return super(BaseReplyForm, self).save(self.recipient, *args, **kwargs)
class QuickReplyForm(BaseReplyForm):
"""
The form to use in the view of a message or a conversation, for a quick reply.
The recipient is imposed and a default value for the subject will be provided.
"""
pass
allow_copies = not getattr(settings, 'POSTMAN_DISALLOW_COPIES_ON_REPLY', False)
class FullReplyForm(BaseReplyForm):
"""The complete reply form."""
if allow_copies:
recipients = CommaSeparatedUserField(
label=(_("Additional recipients"), _("Additional recipient")), help_text='', required=False)
class Meta(BaseReplyForm.Meta):
fields = (['recipients'] if allow_copies else []) + ['subject', 'body']
| 40.036364
| 129
| 0.63431
|
from __future__ import unicode_literals
from django import forms
from django.conf import settings
try:
from django.contrib.auth import get_user_model
except ImportError:
from postman.future_1_5 import get_user_model
from django.db import transaction
from django.utils.translation import ugettext, ugettext_lazy as _
from postman.fields import CommaSeparatedUserField
from postman.models import Message
from postman.utils import WRAP_WIDTH
class BaseWriteForm(forms.ModelForm):
class Meta:
model = Message
fields = ('body',)
widgets = {
'body': forms.Textarea(attrs={'cols': WRAP_WIDTH, 'rows': 12}),
}
error_css_class = 'error'
required_css_class = 'required'
def __init__(self, *args, **kwargs):
sender = kwargs.pop('sender', None)
exchange_filter = kwargs.pop('exchange_filter', None)
user_filter = kwargs.pop('user_filter', None)
max = kwargs.pop('max', None)
channel = kwargs.pop('channel', None)
self.site = kwargs.pop('site', None)
super(BaseWriteForm, self).__init__(*args, **kwargs)
self.fields['body'].widget.attrs['placeholder'] = 'Write a message'
if 'subject' in self.fields:
self.fields['subject'].widget.attrs['placeholder'] = 'Subject'
self.instance.sender = sender if (sender and sender.is_authenticated()) else None
if exchange_filter:
self.exchange_filter = exchange_filter
if 'recipients' in self.fields:
self.fields['recipients'].widget.attrs['placeholder'] = 'Recipients'
if user_filter and hasattr(self.fields['recipients'], 'user_filter'):
self.fields['recipients'].user_filter = user_filter
if getattr(settings, 'POSTMAN_DISALLOW_MULTIRECIPIENTS', False):
max = 1
if max is not None and hasattr(self.fields['recipients'], 'set_max') \
and getattr(self, 'can_overwrite_limits', True):
self.fields['recipients'].set_max(max)
if channel and hasattr(self.fields['recipients'], 'set_arg'):
self.fields['recipients'].set_arg(channel)
error_messages = {
'filtered': _("Writing to some users is not possible: {users}."),
'filtered_user': _("{username}"),
'filtered_user_with_reason': _("{username} ({reason})"),
}
def clean_recipients(self):
recipients = self.cleaned_data['recipients']
exchange_filter = getattr(self, 'exchange_filter', None)
if exchange_filter:
errors = []
filtered_names = []
recipients_list = recipients[:]
for u in recipients_list:
try:
reason = exchange_filter(self.instance.sender, u, recipients_list)
if reason is not None:
recipients.remove(u)
filtered_names.append(
self.error_messages[
'filtered_user_with_reason' if reason else 'filtered_user'
].format(username=u.get_username(), reason=reason)
)
except forms.ValidationError as e:
recipients.remove(u)
errors.extend(e.messages)
if filtered_names:
errors.append(self.error_messages['filtered'].format(users=', '.join(filtered_names)))
if errors:
raise forms.ValidationError(errors)
return recipients
def save(self, recipient=None, parent=None, auto_moderators=[]):
recipients = self.cleaned_data.get('recipients', [])
if parent and not parent.thread_id:
parent.thread = parent
parent.save()
if parent:
self.instance.parent = parent
self.instance.thread_id = parent.thread_id
initial_moderation = self.instance.get_moderation()
initial_dates = self.instance.get_dates()
initial_status = self.instance.moderation_status
if recipient:
if isinstance(recipient, get_user_model()) and recipient in recipients:
recipients.remove(recipient)
recipients.insert(0, recipient)
is_successful = True
for r in recipients:
if isinstance(r, get_user_model()):
self.instance.recipient = r
else:
self.instance.recipient = None
self.instance.email = r
self.instance.pk = None
self.instance.auto_moderate(auto_moderators)
self.instance.clean_moderation(initial_status)
self.instance.clean_for_visitor()
super(BaseWriteForm, self).save()
if self.instance.is_rejected():
is_successful = False
self.instance.update_parent(initial_status)
self.instance.notify_users(initial_status, self.site)
if not isinstance(r, get_user_model()):
self.instance.email = ''
self.instance.set_moderation(*initial_moderation)
self.instance.set_dates(*initial_dates)
return is_successful
save = transaction.atomic(save) if hasattr(transaction, 'atomic') else transaction.commit_on_success(save)
class WriteForm(BaseWriteForm):
recipients = CommaSeparatedUserField(label=(_("Recipients"), _("Recipient")), help_text='')
class Meta(BaseWriteForm.Meta):
fields = ('recipients', 'subject', 'body')
class AnonymousWriteForm(BaseWriteForm):
can_overwrite_limits = False
email = forms.EmailField(label=_("Email"))
recipients = CommaSeparatedUserField(label=(_("Recipients"), _("Recipient")), help_text='', max=1)
class Meta(BaseWriteForm.Meta):
fields = ('email', 'recipients', 'subject', 'body')
class BaseReplyForm(BaseWriteForm):
def __init__(self, *args, **kwargs):
recipient = kwargs.pop('recipient', None)
super(BaseReplyForm, self).__init__(*args, **kwargs)
self.recipient = recipient
def clean(self):
if not self.recipient:
raise forms.ValidationError(ugettext("Undefined recipient."))
return super(BaseReplyForm, self).clean()
def save(self, *args, **kwargs):
return super(BaseReplyForm, self).save(self.recipient, *args, **kwargs)
class QuickReplyForm(BaseReplyForm):
pass
allow_copies = not getattr(settings, 'POSTMAN_DISALLOW_COPIES_ON_REPLY', False)
class FullReplyForm(BaseReplyForm):
if allow_copies:
recipients = CommaSeparatedUserField(
label=(_("Additional recipients"), _("Additional recipient")), help_text='', required=False)
class Meta(BaseReplyForm.Meta):
fields = (['recipients'] if allow_copies else []) + ['subject', 'body']
| true
| true
|
f7158ab6ed278e6df18c8b2e6bfd09087bd18ae7
| 426
|
py
|
Python
|
Util/EnvUtil.py
|
xrkk/proxy_pool
|
7e4f732041f51fa6aa9a2e906ad9e7cab880f2b6
|
[
"MIT"
] | null | null | null |
Util/EnvUtil.py
|
xrkk/proxy_pool
|
7e4f732041f51fa6aa9a2e906ad9e7cab880f2b6
|
[
"MIT"
] | null | null | null |
Util/EnvUtil.py
|
xrkk/proxy_pool
|
7e4f732041f51fa6aa9a2e906ad9e7cab880f2b6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: EnvUtil
Description : 环境相关
Author : J_hao
date: 2017/9/18
-------------------------------------------------
Change Activity:
2017/9/18: 区分Python版本
-------------------------------------------------
"""
__author__ = 'J_hao'
import sys
PY3 = sys.version_info >= (3,)
| 25.058824
| 50
| 0.319249
|
__author__ = 'J_hao'
import sys
PY3 = sys.version_info >= (3,)
| true
| true
|
f7158afa9cbb6416fad2e41340029a8fbbd333f2
| 12,471
|
py
|
Python
|
tfx/orchestration/kubeflow/kubeflow_dag_runner_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | null | null | null |
tfx/orchestration/kubeflow/kubeflow_dag_runner_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | null | null | null |
tfx/orchestration/kubeflow/kubeflow_dag_runner_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.kubeflow.kubeflow_dag_runner."""
import json
import os
import tarfile
from typing import List
from kfp import onprem
import tensorflow as tf
from tfx.components.statistics_gen import component as statistics_gen_component
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component
from tfx.orchestration import data_types
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.decorators import FinalStatusStr
from tfx.proto import example_gen_pb2
from tfx.types import component_spec
from tfx.utils import telemetry_utils
from tfx.utils import test_case_utils
import yaml
from ml_metadata.proto import metadata_store_pb2
@component
def _say_hi(status: Parameter[str]):
print(status)
# 2-step pipeline under test.
def _two_step_pipeline() -> tfx_pipeline.Pipeline:
default_input_config = json.dumps({
'splits': [{
'name': 'single_split',
'pattern': 'SELECT * FROM default-table'
}]
})
input_config = data_types.RuntimeParameter(
name='input_config', ptype=str, default=default_input_config)
example_gen = big_query_example_gen_component.BigQueryExampleGen(
input_config=input_config, output_config=example_gen_pb2.Output())
statistics_gen = statistics_gen_component.StatisticsGen(
examples=example_gen.outputs['examples'])
return tfx_pipeline.Pipeline(
pipeline_name='two_step_pipeline',
pipeline_root='pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[example_gen, statistics_gen],
)
class _DummySpec(component_spec.ComponentSpec):
INPUTS = {}
OUTPUTS = {}
PARAMETERS = {}
class _DummyComponent(base_component.BaseComponent):
SPEC_CLASS = _DummySpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='dummy:latest', command=['ls'])
def __init__(self):
super().__init__(_DummySpec())
def _container_component_pipeline() -> tfx_pipeline.Pipeline:
return tfx_pipeline.Pipeline(
pipeline_name='container_component_pipeline',
pipeline_root='pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[_DummyComponent()],
)
class KubeflowDagRunnerTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata')
self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))
def _compare_tfx_ir_against_testdata(self, args: List[str], golden_file: str):
index_of_tfx_ir_flag = args.index('--tfx_ir')
self.assertAllGreater(len(args), index_of_tfx_ir_flag)
real_tfx_ir = json.loads(args[index_of_tfx_ir_flag + 1])
real_tfx_ir_str = json.dumps(real_tfx_ir, sort_keys=True)
with open(os.path.join(self._source_data_dir,
golden_file)) as tfx_ir_json_file:
formatted_tfx_ir = json.dumps(json.load(tfx_ir_json_file), sort_keys=True)
self.assertEqual(real_tfx_ir_str, formatted_tfx_ir)
def testTwoStepPipeline(self):
"""Sanity-checks the construction and dependencies for a 2-step pipeline."""
kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())
file_path = os.path.join(self.tmp_dir, 'two_step_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
big_query_container = [
c for c in containers if c['name'] == 'bigqueryexamplegen'
]
self.assertEqual(1, len(big_query_container))
self.assertEqual([
'python',
'-m',
'tfx.orchestration.kubeflow.container_entrypoint',
], big_query_container[0]['container']['command'])
self.assertIn('--tfx_ir', big_query_container[0]['container']['args'])
self.assertIn('--node_id', big_query_container[0]['container']['args'])
self._compare_tfx_ir_against_testdata(
big_query_container[0]['container']['args'],
'two_step_pipeline_post_dehydrate_ir.json')
statistics_gen_container = [
c for c in containers if c['name'] == 'statisticsgen'
]
self.assertEqual(1, len(statistics_gen_container))
# Ensure the pod labels are correctly appended.
metadata = [
c['metadata'] for c in pipeline['spec']['templates'] if 'dag' not in c
]
for m in metadata:
self.assertEqual('tfx', m['labels'][telemetry_utils.LABEL_KFP_SDK_ENV])
# Ensure dependencies between components are captured.
dag = [c for c in pipeline['spec']['templates'] if 'dag' in c]
self.assertEqual(1, len(dag))
self.assertEqual(
{
'tasks': [{
'name': 'bigqueryexamplegen',
'template': 'bigqueryexamplegen',
'arguments': {
'parameters': [{
'name': 'input_config',
'value': '{{inputs.parameters.input_config}}'
}, {
'name': 'pipeline-root',
'value': '{{inputs.parameters.pipeline-root}}'
}]
}
}, {
'name': 'statisticsgen',
'template': 'statisticsgen',
'arguments': {
'parameters': [{
'name': 'pipeline-root',
'value': '{{inputs.parameters.pipeline-root}}'
}]
},
'dependencies': ['bigqueryexamplegen'],
}]
}, dag[0]['dag'])
def testDefaultPipelineOperatorFuncs(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
def testMountGcpServiceAccount(self):
kubeflow_dag_runner.KubeflowDagRunner(
config=kubeflow_dag_runner.KubeflowDagRunnerConfig(
pipeline_operator_funcs=kubeflow_dag_runner
.get_default_pipeline_operator_funcs(use_gcp_sa=True))).run(
_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
# Check that each container has default GCP credentials.
container_0 = containers[0]
env = [
env for env in container_0['container']['env']
if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'
]
self.assertEqual(1, len(env))
self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',
env[0]['value'])
container_1 = containers[0]
env = [
env for env in container_1['container']['env']
if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'
]
self.assertEqual(1, len(env))
self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',
env[0]['value'])
def testVolumeMountingPipelineOperatorFuncs(self):
mount_volume_op = onprem.mount_pvc('my-persistent-volume-claim',
'my-volume-name',
'/mnt/volume-mount-path')
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
pipeline_operator_funcs=[mount_volume_op])
kubeflow_dag_runner.KubeflowDagRunner(config=config).run(
_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
container_templates = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(container_templates))
volumes = [{
'name': 'my-volume-name',
'persistentVolumeClaim': {
'claimName': 'my-persistent-volume-claim'
}
}]
# Check that the PVC is specified for kfp<=0.1.31.1.
if 'volumes' in pipeline['spec']:
self.assertEqual(volumes, pipeline['spec']['volumes'])
for template in container_templates:
# Check that each container has the volume mounted.
self.assertEqual([{
'name': 'my-volume-name',
'mountPath': '/mnt/volume-mount-path'
}], template['container']['volumeMounts'])
# Check that each template has the PVC specified for kfp>=0.1.31.2.
if 'volumes' in template:
self.assertEqual(volumes, template['volumes'])
def testContainerComponent(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_container_component_pipeline())
file_path = os.path.join(self.tmp_dir,
'container_component_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertLen(containers, 1)
component_args = containers[0]['container']['args']
self.assertIn('--node_id', component_args)
def testExitHandler(self):
dag_runner = kubeflow_dag_runner.KubeflowDagRunner()
dag_runner.set_exit_handler(_say_hi(status=FinalStatusStr()))
pipeline = _container_component_pipeline()
pipeline.enable_cache = True
dag_runner.run(pipeline)
file_path = os.path.join(self.tmp_dir,
'container_component_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
self.assertIn('onExit', pipeline['spec'])
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertLen(containers, 2)
exit_component_args = ' '.join(containers[1]['container']['args'])
self.assertIn('{{workflow.status}}', exit_component_args)
self.assertNotIn('enableCache', exit_component_args)
first_component_args = ' '.join(containers[0]['container']['args'])
self.assertNotIn('{{workflow.status}}', first_component_args)
self.assertIn('enableCache', first_component_args)
if __name__ == '__main__':
tf.test.main()
| 37.790909
| 106
| 0.667228
|
import json
import os
import tarfile
from typing import List
from kfp import onprem
import tensorflow as tf
from tfx.components.statistics_gen import component as statistics_gen_component
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.components.base import base_component
from tfx.dsl.io import fileio
from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component
from tfx.orchestration import data_types
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.decorators import FinalStatusStr
from tfx.proto import example_gen_pb2
from tfx.types import component_spec
from tfx.utils import telemetry_utils
from tfx.utils import test_case_utils
import yaml
from ml_metadata.proto import metadata_store_pb2
@component
def _say_hi(status: Parameter[str]):
print(status)
def _two_step_pipeline() -> tfx_pipeline.Pipeline:
default_input_config = json.dumps({
'splits': [{
'name': 'single_split',
'pattern': 'SELECT * FROM default-table'
}]
})
input_config = data_types.RuntimeParameter(
name='input_config', ptype=str, default=default_input_config)
example_gen = big_query_example_gen_component.BigQueryExampleGen(
input_config=input_config, output_config=example_gen_pb2.Output())
statistics_gen = statistics_gen_component.StatisticsGen(
examples=example_gen.outputs['examples'])
return tfx_pipeline.Pipeline(
pipeline_name='two_step_pipeline',
pipeline_root='pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[example_gen, statistics_gen],
)
class _DummySpec(component_spec.ComponentSpec):
INPUTS = {}
OUTPUTS = {}
PARAMETERS = {}
class _DummyComponent(base_component.BaseComponent):
SPEC_CLASS = _DummySpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='dummy:latest', command=['ls'])
def __init__(self):
super().__init__(_DummySpec())
def _container_component_pipeline() -> tfx_pipeline.Pipeline:
return tfx_pipeline.Pipeline(
pipeline_name='container_component_pipeline',
pipeline_root='pipeline_root',
metadata_connection_config=metadata_store_pb2.ConnectionConfig(),
components=[_DummyComponent()],
)
class KubeflowDagRunnerTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata')
self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))
def _compare_tfx_ir_against_testdata(self, args: List[str], golden_file: str):
index_of_tfx_ir_flag = args.index('--tfx_ir')
self.assertAllGreater(len(args), index_of_tfx_ir_flag)
real_tfx_ir = json.loads(args[index_of_tfx_ir_flag + 1])
real_tfx_ir_str = json.dumps(real_tfx_ir, sort_keys=True)
with open(os.path.join(self._source_data_dir,
golden_file)) as tfx_ir_json_file:
formatted_tfx_ir = json.dumps(json.load(tfx_ir_json_file), sort_keys=True)
self.assertEqual(real_tfx_ir_str, formatted_tfx_ir)
def testTwoStepPipeline(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())
file_path = os.path.join(self.tmp_dir, 'two_step_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
big_query_container = [
c for c in containers if c['name'] == 'bigqueryexamplegen'
]
self.assertEqual(1, len(big_query_container))
self.assertEqual([
'python',
'-m',
'tfx.orchestration.kubeflow.container_entrypoint',
], big_query_container[0]['container']['command'])
self.assertIn('--tfx_ir', big_query_container[0]['container']['args'])
self.assertIn('--node_id', big_query_container[0]['container']['args'])
self._compare_tfx_ir_against_testdata(
big_query_container[0]['container']['args'],
'two_step_pipeline_post_dehydrate_ir.json')
statistics_gen_container = [
c for c in containers if c['name'] == 'statisticsgen'
]
self.assertEqual(1, len(statistics_gen_container))
metadata = [
c['metadata'] for c in pipeline['spec']['templates'] if 'dag' not in c
]
for m in metadata:
self.assertEqual('tfx', m['labels'][telemetry_utils.LABEL_KFP_SDK_ENV])
dag = [c for c in pipeline['spec']['templates'] if 'dag' in c]
self.assertEqual(1, len(dag))
self.assertEqual(
{
'tasks': [{
'name': 'bigqueryexamplegen',
'template': 'bigqueryexamplegen',
'arguments': {
'parameters': [{
'name': 'input_config',
'value': '{{inputs.parameters.input_config}}'
}, {
'name': 'pipeline-root',
'value': '{{inputs.parameters.pipeline-root}}'
}]
}
}, {
'name': 'statisticsgen',
'template': 'statisticsgen',
'arguments': {
'parameters': [{
'name': 'pipeline-root',
'value': '{{inputs.parameters.pipeline-root}}'
}]
},
'dependencies': ['bigqueryexamplegen'],
}]
}, dag[0]['dag'])
def testDefaultPipelineOperatorFuncs(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
def testMountGcpServiceAccount(self):
kubeflow_dag_runner.KubeflowDagRunner(
config=kubeflow_dag_runner.KubeflowDagRunnerConfig(
pipeline_operator_funcs=kubeflow_dag_runner
.get_default_pipeline_operator_funcs(use_gcp_sa=True))).run(
_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(containers))
container_0 = containers[0]
env = [
env for env in container_0['container']['env']
if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'
]
self.assertEqual(1, len(env))
self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',
env[0]['value'])
container_1 = containers[0]
env = [
env for env in container_1['container']['env']
if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'
]
self.assertEqual(1, len(env))
self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',
env[0]['value'])
def testVolumeMountingPipelineOperatorFuncs(self):
mount_volume_op = onprem.mount_pvc('my-persistent-volume-claim',
'my-volume-name',
'/mnt/volume-mount-path')
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
pipeline_operator_funcs=[mount_volume_op])
kubeflow_dag_runner.KubeflowDagRunner(config=config).run(
_two_step_pipeline())
file_path = 'two_step_pipeline.tar.gz'
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
container_templates = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertEqual(2, len(container_templates))
volumes = [{
'name': 'my-volume-name',
'persistentVolumeClaim': {
'claimName': 'my-persistent-volume-claim'
}
}]
if 'volumes' in pipeline['spec']:
self.assertEqual(volumes, pipeline['spec']['volumes'])
for template in container_templates:
self.assertEqual([{
'name': 'my-volume-name',
'mountPath': '/mnt/volume-mount-path'
}], template['container']['volumeMounts'])
if 'volumes' in template:
self.assertEqual(volumes, template['volumes'])
def testContainerComponent(self):
kubeflow_dag_runner.KubeflowDagRunner().run(_container_component_pipeline())
file_path = os.path.join(self.tmp_dir,
'container_component_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertLen(containers, 1)
component_args = containers[0]['container']['args']
self.assertIn('--node_id', component_args)
def testExitHandler(self):
dag_runner = kubeflow_dag_runner.KubeflowDagRunner()
dag_runner.set_exit_handler(_say_hi(status=FinalStatusStr()))
pipeline = _container_component_pipeline()
pipeline.enable_cache = True
dag_runner.run(pipeline)
file_path = os.path.join(self.tmp_dir,
'container_component_pipeline.tar.gz')
self.assertTrue(fileio.exists(file_path))
with tarfile.TarFile.open(file_path).extractfile(
'pipeline.yaml') as pipeline_file:
self.assertIsNotNone(pipeline_file)
pipeline = yaml.safe_load(pipeline_file)
self.assertIn('onExit', pipeline['spec'])
containers = [
c for c in pipeline['spec']['templates'] if 'container' in c
]
self.assertLen(containers, 2)
exit_component_args = ' '.join(containers[1]['container']['args'])
self.assertIn('{{workflow.status}}', exit_component_args)
self.assertNotIn('enableCache', exit_component_args)
first_component_args = ' '.join(containers[0]['container']['args'])
self.assertNotIn('{{workflow.status}}', first_component_args)
self.assertIn('enableCache', first_component_args)
if __name__ == '__main__':
tf.test.main()
| true
| true
|
f7158c0d4644817021a89da48a6f1e663928ae91
| 2,766
|
py
|
Python
|
catalyst/dl/utils/trace.py
|
162/catalyst
|
b4ba36be52c51160e0fabecdcb084a8d5cd96cb7
|
[
"MIT"
] | null | null | null |
catalyst/dl/utils/trace.py
|
162/catalyst
|
b4ba36be52c51160e0fabecdcb084a8d5cd96cb7
|
[
"MIT"
] | null | null | null |
catalyst/dl/utils/trace.py
|
162/catalyst
|
b4ba36be52c51160e0fabecdcb084a8d5cd96cb7
|
[
"MIT"
] | null | null | null |
from typing import Type
import torch
from torch import nn
from torch.jit import ScriptModule
from catalyst.dl.core import Experiment, Runner
class _ForwardOverrideModel(nn.Module):
"""
Model that calls specified method instead of forward
(Workaround, single method tracing is not supported)
"""
def __init__(self, model, method_name):
super().__init__()
self.model = model
self.method = method_name
def forward(self, *args, **kwargs):
return getattr(self.model, self.method)(*args, **kwargs)
class _TracingModelWrapper(nn.Module):
"""
Wrapper that traces model with batch instead of calling it
(Workaround, to use native model batch handler)
"""
def __init__(self, model, method_name):
super().__init__()
self.method_name = method_name
self.model = model
self.tracing_result: ScriptModule
def __call__(self, *args, **kwargs):
method_model = _ForwardOverrideModel(
self.model, self.method_name
)
self.tracing_result = \
torch.jit.trace(
method_model,
*args, **kwargs
)
def _get_native_batch(
experiment: Experiment, stage: str
):
"""Returns dataset from first loader provided by experiment"""
loaders = experiment.get_loaders(stage)
assert loaders, \
"Experiment must have at least one loader to support tracing"
# Take first loader
loader = next(iter(loaders.values()))
dataset = loader.dataset
collate_fn = loader.collate_fn
sample = collate_fn([dataset[0]])
return sample
def trace_model(
model: nn.Module,
experiment: Experiment,
runner_type: Type[Runner],
method_name: str = "forward"
) -> ScriptModule:
"""
Traces model using it's native experiment and runner.
Args:
model: Model to trace
NOTICE: will be switched to eval and
requires_grad=False will be set on all params
experiment: Native experiment that was used to train model
runner_type: Model's native runner that was used to train model
method_name: Model's method name that will be
used as entrypoint during tracing
Returns:
Traced model ScriptModule
"""
stage = list(experiment.stages)[0]
model.eval()
for p in model.parameters():
p.requires_grad_(False)
tracer = _TracingModelWrapper(model, method_name)
runner: Runner = runner_type(tracer.cpu(), torch.device("cpu"))
batch = _get_native_batch(experiment, stage)
batch = runner._batch2device(batch, device=runner.device)
runner.predict_batch(batch)
return tracer.tracing_result
__all__ = ["trace_model"]
| 25.850467
| 73
| 0.656905
|
from typing import Type
import torch
from torch import nn
from torch.jit import ScriptModule
from catalyst.dl.core import Experiment, Runner
class _ForwardOverrideModel(nn.Module):
def __init__(self, model, method_name):
super().__init__()
self.model = model
self.method = method_name
def forward(self, *args, **kwargs):
return getattr(self.model, self.method)(*args, **kwargs)
class _TracingModelWrapper(nn.Module):
def __init__(self, model, method_name):
super().__init__()
self.method_name = method_name
self.model = model
self.tracing_result: ScriptModule
def __call__(self, *args, **kwargs):
method_model = _ForwardOverrideModel(
self.model, self.method_name
)
self.tracing_result = \
torch.jit.trace(
method_model,
*args, **kwargs
)
def _get_native_batch(
experiment: Experiment, stage: str
):
loaders = experiment.get_loaders(stage)
assert loaders, \
"Experiment must have at least one loader to support tracing"
loader = next(iter(loaders.values()))
dataset = loader.dataset
collate_fn = loader.collate_fn
sample = collate_fn([dataset[0]])
return sample
def trace_model(
model: nn.Module,
experiment: Experiment,
runner_type: Type[Runner],
method_name: str = "forward"
) -> ScriptModule:
stage = list(experiment.stages)[0]
model.eval()
for p in model.parameters():
p.requires_grad_(False)
tracer = _TracingModelWrapper(model, method_name)
runner: Runner = runner_type(tracer.cpu(), torch.device("cpu"))
batch = _get_native_batch(experiment, stage)
batch = runner._batch2device(batch, device=runner.device)
runner.predict_batch(batch)
return tracer.tracing_result
__all__ = ["trace_model"]
| true
| true
|
f7158d1fdb4e339a2eeef76b607b2b96c5f92797
| 11,206
|
py
|
Python
|
tensorflow/python/distribute/multi_process_runner_test.py
|
Diva-Pant/tensorflow
|
f926d8c10efb07176ae559d0e098cdfdb4d03219
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/distribute/multi_process_runner_test.py
|
Diva-Pant/tensorflow
|
f926d8c10efb07176ae559d0e098cdfdb4d03219
|
[
"Apache-2.0"
] | 1
|
2020-08-12T09:47:19.000Z
|
2020-08-12T09:47:19.000Z
|
tensorflow/python/distribute/multi_process_runner_test.py
|
Diva-Pant/tensorflow
|
f926d8c10efb07176ae559d0e098cdfdb4d03219
|
[
"Apache-2.0"
] | 25
|
2020-08-31T12:21:19.000Z
|
2022-03-20T05:16:32.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multi_process_runner`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import threading
import time
from absl import logging
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data():
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_return_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
def proc_func_with_barrier():
return multi_process_runner.barrier()
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
proc_func_that_return_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_process_that_exits(self):
def func_to_exit_in_25_sec():
logging.error('foo')
time.sleep(100)
logging.error('bar')
mpr = multi_process_runner.MultiProcessRunner(
func_to_exit_in_25_sec,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True,
max_run_time=25)
mpr.start()
stdout = mpr.join().stdout
self.assertLen([msg for msg in stdout if 'foo' in msg], 1)
self.assertLen([msg for msg in stdout if 'bar' in msg], 0)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, so it should not have iteration 9
# printed.
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, but a new worker 0 is added, so it
# should still have iteration 9 printed. Moreover, iteration 0 of worker 0
# should happen twice.
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def proc_func():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2, has_eval=True),
list_stdout=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def proc_func():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
def eval_func():
time.sleep(1)
mpr.start_single_process(task_type='evaluator', task_id=0)
eval_thread = threading.Thread(target=eval_func)
eval_thread.start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
eval_thread.join()
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_terminate_all_does_not_ignore_error(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(60)
mpr.terminate_all()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
mpr.join()
def test_barrier(self):
multi_process_runner.run(
proc_func_with_barrier,
cluster_spec=multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
)
def test_barrier_called_in_main_process(self):
with self.assertRaises(ValueError):
multi_process_runner.barrier()
def test_stdout_available_when_timeout(self):
def proc_func():
for i in range(50):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
time.sleep(1)
with self.assertRaises(multi_process_runner.SubprocessTimeoutError) as cm:
multi_process_runner.run(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
list_stdout=True,
timeout=5)
list_to_assert = cm.exception.mpr_result.stdout
for job in ['worker', 'ps']:
for iteration in range(0, 5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
if __name__ == '__main__':
multi_process_runner.test_main()
| 34.374233
| 80
| 0.663127
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import threading
import time
from absl import logging
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data():
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_return_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
def proc_func_with_barrier():
return multi_process_runner.barrier()
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
proc_func_that_return_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_process_that_exits(self):
def func_to_exit_in_25_sec():
logging.error('foo')
time.sleep(100)
logging.error('bar')
mpr = multi_process_runner.MultiProcessRunner(
func_to_exit_in_25_sec,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True,
max_run_time=25)
mpr.start()
stdout = mpr.join().stdout
self.assertLen([msg for msg in stdout if 'foo' in msg], 1)
self.assertLen([msg for msg in stdout if 'bar' in msg], 0)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def proc_func():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2, has_eval=True),
list_stdout=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def proc_func():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
def eval_func():
time.sleep(1)
mpr.start_single_process(task_type='evaluator', task_id=0)
eval_thread = threading.Thread(target=eval_func)
eval_thread.start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
eval_thread.join()
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_terminate_all_does_not_ignore_error(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(60)
mpr.terminate_all()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
mpr.join()
def test_barrier(self):
multi_process_runner.run(
proc_func_with_barrier,
cluster_spec=multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
)
def test_barrier_called_in_main_process(self):
with self.assertRaises(ValueError):
multi_process_runner.barrier()
def test_stdout_available_when_timeout(self):
def proc_func():
for i in range(50):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
time.sleep(1)
with self.assertRaises(multi_process_runner.SubprocessTimeoutError) as cm:
multi_process_runner.run(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
list_stdout=True,
timeout=5)
list_to_assert = cm.exception.mpr_result.stdout
for job in ['worker', 'ps']:
for iteration in range(0, 5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
if __name__ == '__main__':
multi_process_runner.test_main()
| true
| true
|
f7158d5e6cf2a8dfdb996beebce53453c96ec708
| 281
|
py
|
Python
|
lambda/index.py
|
sano307/lambda-container-demo
|
6c27c56819c9a3defb63bf26b4fd53bf6cdb71d3
|
[
"MIT"
] | null | null | null |
lambda/index.py
|
sano307/lambda-container-demo
|
6c27c56819c9a3defb63bf26b4fd53bf6cdb71d3
|
[
"MIT"
] | null | null | null |
lambda/index.py
|
sano307/lambda-container-demo
|
6c27c56819c9a3defb63bf26b4fd53bf6cdb71d3
|
[
"MIT"
] | 1
|
2021-07-18T03:52:40.000Z
|
2021-07-18T03:52:40.000Z
|
import json
import pandas as pd
def handler(event, context):
df = pd.DataFrame({"id": [1, 2], "value": ["foo", "boo"]})
print(df)
return {
"statusCode": 200,
"body": json.dumps({
"message": "This is a container lambda."
})
}
| 17.5625
| 62
| 0.512456
|
import json
import pandas as pd
def handler(event, context):
df = pd.DataFrame({"id": [1, 2], "value": ["foo", "boo"]})
print(df)
return {
"statusCode": 200,
"body": json.dumps({
"message": "This is a container lambda."
})
}
| true
| true
|
f7158d9cf34dc0b5ca5dc19e15a61f7fd3e08c77
| 12,588
|
py
|
Python
|
test.py
|
trs123s/ModernFarming
|
28f99c090ed041486c3c3bbae1054cc9279261bd
|
[
"MIT"
] | null | null | null |
test.py
|
trs123s/ModernFarming
|
28f99c090ed041486c3c3bbae1054cc9279261bd
|
[
"MIT"
] | null | null | null |
test.py
|
trs123s/ModernFarming
|
28f99c090ed041486c3c3bbae1054cc9279261bd
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter.ttk import *
import sqlite3
from tkinter import *
'''
import speech_recognition as sr # for speech recognition to play songs
import pyttsx3 as tts # python module for speech
engine = tts.init()
volume = engine.getProperty('volume')
engine.setProperty('volume',0.75)
voices = engine.getProperty('voices')
rate = engine.getProperty('rate')
engine.setProperty('voice', voices[0].id)
engine.setProperty('rate', 150)
'''
root = tk.Tk()
root.title("DataBase Manager by Mohit Gupta")
root.geometry("800x640")
#-------------------------create text box--------------------------------------------
songs = Entry(root, width=50)
songs.grid(row=8,column=1,pady=5)
age0_2 = Entry(root, width=50)
age0_2.grid(row=9, column=1,pady=5)
age4_6 = Entry(root, width=50)
age4_6.grid(row=10, column=1,pady=5)
age8_12 = Entry(root, width=50)
age8_12.grid(row=11, column=1,pady=5)
age15_20 = Entry(root, width=50)
age15_20.grid(row=12, column=1,pady=5)
age25_32 = Entry(root, width=50)
age25_32.grid(row=13, column=1,pady=5)
age38_43 = Entry(root, width=50)
age38_43.grid(row=14, column=1,pady=5)
age48_53 = Entry(root, width=50)
age48_53.grid(row=15, column=1,pady=5)
age60_100 = Entry(root, width=50)
age60_100.grid(row=16, column=1,pady=5)
singer_name = Entry(root, width=50)
singer_name.grid(row=17, column=1,pady=5)
h = Entry(root, width=50)
h.grid(row=18, column=1,pady=5)
s = Entry(root, width=50)
s.grid(row=19, column=1,pady=5)
a = Entry(root, width=50)
a.grid(row=20, column=1,pady=5)
cr = Entry(root, width=50)
cr.grid(row=21, column=1,pady=5)
su = Entry(root, width=50)
su.grid(row=22, column=1,pady=5)
delete = Entry(root, width=20)
delete.grid(row=11, column=3, pady=5)
#--------------------------------create text box label--------------------------------------------
songs_label = Label(root, text="Songs",padx=5)
songs_label.grid(row=8, column=0)
age0_2_label = Label(root, text="Age0_2",padx=5)
age0_2_label.grid(row=9, column=0)
age4_6_label = Label(root, text="Age4_6",padx=5)
age4_6_label.grid(row=10, column=0)
age8_12_label = Label(root, text="Age8_12",padx=5)
age8_12_label.grid(row=11, column=0)
age15_20_label = Label(root, text="Age15_20",padx=5)
age15_20_label.grid(row=12, column=0)
age25_32_label = Label(root, text="Age25_32",padx=5)
age25_32_label.grid(row=13, column=0)
age38_43_label = Label(root, text="Age38_43",padx=5)
age38_43_label.grid(row=14, column=0)
age48_53_label = Label(root, text="Age48_53",padx=5)
age48_53_label.grid(row=15, column=0)
age60_100_label = Label(root, text="Age60_100",padx=5)
age60_100_label.grid(row=16, column=0)
singer_name_label = Label(root, text="singer",padx=5)
singer_name_label.grid(row=17, column=0)
h_label = Label(root, text="Happy",padx=5)
h_label.grid(row=18, column=0)
s_label = Label(root, text="Sad",padx=5)
s_label.grid(row=19, column=0)
a_label = Label(root, text="Angry",padx=5)
a_label.grid(row=20, column=0)
cr_label = Label(root, text="cry",padx=5)
cr_label.grid(row=21, column=0)
su_label = Label(root, text="Surprise",padx=5)
su_label.grid(row=22, column=0)
delete_label = Label(root, text="Select ID")
delete_label.grid(row=11, column=2, pady=10)
#----------------------------info---------------------------------------
def update():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
item_id = delete.get() #b3 is delete button
c.execute("""UPDATE music SET
songs = :songs,
age0_2 = :age0_2,
age4_6 = :age4_6,
age8_12 = :age8_12,
age15_20 = :age15_20,
age25_32 = :age25_32,
age38_43 = :age38_43,
age48_53 = :age48_53,
age60_100 = :age60_100,
singer_name = :singer_name,
happy = :h,
sad = :s,
angry = :a,
cry = :cr,
surprise = :su,
WHERE oid = :oid""",
{
'songs': songs_editor.get(),
'age0_2': age0_2_editor.get(),
'age4_6': age4_6_editor.get(),
'age8_12': age8_12_editor.get(),
'age15_20': age15_20_editor.get(),
'age25_32': age25_32_editor.get(),
'age38_43': age38_43_editor.get(),
'age48_53': age48_53_editor.get(),
'age60_100': age60_100_editor.get(),
'singer_name': singer_name_editor.get(),
'h': h_editor.get(),
's': s_editor.get(),
'a': a_editor.get(),
'cr': cr_editor.get(),
'su': su_editor.get(),
'oid': item_id
})
conn.commit()
conn.close()
def edit():
editor = Tk()
editor.title("Information")
editor.geometry("600x640")
conn = sqlite3.connect('music4.db')
c = conn.cursor()
item_id = delete.get()
c.execute("SELECT * FROM music WHERE oid = "+ item_id )
items = c.fetchall()
songs_editor = Entry(editor, width=50)
songs_editor.grid(row=8,column=1,pady=5)
age0_2_editor = Entry(editor, width=50)
age0_2_editor.grid(row=9, column=1,pady=5)
age4_6_editor = Entry(editor, width=50)
age4_6_editor.grid(row=10, column=1,pady=5)
age8_12_editor = Entry(editor, width=50)
age8_12_editor.grid(row=11, column=1,pady=5)
age15_20_editor = Entry(editor, width=50)
age15_20_editor.grid(row=12, column=1,pady=5)
age25_32_editor = Entry(editor, width=50)
age25_32_editor.grid(row=13, column=1,pady=5)
age38_43_editor = Entry(editor, width=50)
age38_43_editor.grid(row=14, column=1,pady=5)
age48_53_editor = Entry(editor, width=50)
age48_53_editor.grid(row=15, column=1,pady=5)
age60_100_editor = Entry(editor, width=50)
age60_100_editor.grid(row=16, column=1,pady=5)
singer_name_editor = Entry(editor, width=50)
singer_name_editor.grid(row=17, column=1,pady=5)
h_editor = Entry(editor, width=50)
h_editor.grid(row=17, column=1,pady=5)
s_editor = Entry(editor, width=50)
s_editor.grid(row=18, column=1,pady=5)
a_editor = Entry(editor, width=50)
a_editor.grid(row=19, column=1,pady=5)
cr_editor = Entry(editor, width=50)
cr_editor.grid(row=20, column=1,pady=5)
su_editor= Entry(editor, width=50)
su_editor.grid(row=21, column=1,pady=5)
#--------------------------------create text box label--------------------------------------------
songs_label = Label(editor, text="Songs",padx=5)
songs_label.grid(row=8, column=0)
age0_2_label = Label(editor, text="Age0_2",padx=5)
age0_2_label.grid(row=9, column=0)
age4_6_label = Label(editor, text="Age0_2",padx=5)
age4_6_label.grid(row=10, column=0)
age8_12_label = Label(editor, text="Age0_2",padx=5)
age8_12_label.grid(row=11, column=0)
age15_20_label = Label(editor, text="Age0_2",padx=5)
age15_20_label.grid(row=12, column=0)
age25_32_label = Label(editor, text="Age0_2",padx=5)
age25_32_label.grid(row=13, column=0)
age38_43_label = Label(editor, text="Age0_2",padx=5)
age38_43_label.grid(row=14, column=0)
age48_53_label = Label(editor, text="Age0_2",padx=5)
age48_53_label.grid(row=15, column=0)
age60_100_label = Label(editor, text="Age0_2",padx=5)
age60_100_label.grid(row=16, column=0)
singer_name_label = Label(editor, text="Age0_2",padx=5)
singer_name_label.grid(row=17, column=0)
h_label = Label(editor, text="Happy",padx=5)
h_label.grid(row=17, column=0)
s_label = Label(editor, text="Sad",padx=5)
s_label.grid(row=18, column=0)
a_label = Label(editor, text="Angry",padx=5)
a_label.grid(row=19, column=0)
cr_label = Label(editor, text="cry",padx=5)
cr_label.grid(row=20, column=0)
su_label = Label(editor, text="Surprise",padx=5)
su_label.grid(row=21, column=0)
for item in items:
songs_editor.insert(0, item[0])
age0_2_editor.insert(0, item[1])
age4_6_editor.insert(0, item[2])
age8_12_editor.insert(0, item[3])
age15_20_editor.insert(0, item[4])
age25_32_editor.insert(0, item[5])
age38_43_editor.insert(0, item[6])
age48_53_editor.insert(0, item[7])
age60_100_editor.insert(0, item[8])
singer_name_editor.insert(0, item[9])
h_editor.insert(0, item[10])
s_editor.insert(0, item[11])
a_editor.insert(0, item[12])
cr_editor.insert(0, item[13])
su_editor.insert(0, item[14])
b4_edit = Button(editor, text = "Info",padx=50,fg="white",pady=5,bg="blue")
b4_edit.grid(row=34, column=1)
#--------------------------------ADD TO DATABASE FUNCTION----------------------------
#add a new record to the table
def add_one():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("INSERT INTO music VALUES (:songs, :age0_2, :age4_6, :age8_12, :age15_20, :age25_32, :age38_43, :age48_53, :age60_100, :singer_name, :h, :s, :a, :cr, :su)",
{
'songs': songs.get(),
'age0_2': age0_2.get(),
'age4_6': age4_6.get(),
'age8_12': age8_12.get(),
'age15_20': age15_20.get(),
'age25_32': age25_32.get(),
'age38_43': age38_43.get(),
'age48_53': age48_53.get(),
'age60_100': age60_100.get(),
'singer_name': singer_name.get(),
'h': h.get(),
's': s.get(),
'a': a.get(),
'cr': cr.get(),
'su': su.get()
})
songs.delete(0, END)
age0_2.delete(0, END)
age4_6.delete(0, END)
age8_12.delete(0, END)
age15_20.delete(0, END)
age25_32.delete(0, END)
age38_43.delete(0, END)
age48_53.delete(0, END)
age60_100.delete(0, END)
singer_name.delete(0, END)
h.delete(0, END)
s.delete(0, END)
a.delete(0, END)
cr.delete(0, END)
su.delete(0, END)
conn.commit()
conn.close()
'''
def show_allsongs():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM music WHERE age25_32 LIKE '1' AND happy LIKE '1'")
items = c.fetchall()
#print(str(items))
SELECT rowid, * FROM music
for item in items:
# print(item)
print(str(item[0]) + "\t\t" + str(item[1]) + "\t\t" + str(item[2]) + "\t\t" + str(item[3]) + "\t\t" + str(item[4])+ "\t\t" + str(item[5] + str(item[6]) + "\t\t" + str(item[7]) + "\t\t" + str(item[8]) + "\t\t" + str(item[9]) + "\t\t" + str(item[10])+ "\t\t" + str(item[11])+ str(item[12])+ "\t\t" + str(item[13]))
# conn.commit()
# conn.close()
'''
def show_allSongs():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM music")
items = c.fetchall()
# print(items)
print_items = ''
for item in items:
print_items = str(item[0]) + " " + str(item[1]) + "\t\t\t|" + str(item[2]) + " " + str(item[3]) + " " + str(item[4])+ " " + str(item[5]) + " " + str(item[6]) + " " + str(item[7]) + " " + str(item[8]) + " " + str(item[9]) + " | " + str(item[11]) + " " + str(item[12]) + " " + str(item[13]) + " " + str(item[14]) + " " + str(item[15]) + " | " + str(item[10])
#print_items += "\n"
print(print_items)
print("---------------------------------------------------------------------------------------------------\n")
# b8_label = Label(root, text=print_items)
# b8_label.grid(row=22, column=0,columnspan=2)
conn.commit()
conn.close()
#--------------------------------------------------------------------------------------
######CREATE A TABLE FUNCTION
def create_table():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("""CREATE TABLE music(
songs text,
age0_2 text,
age4_6 text,
age8_12 text,
age15_20 text,
age25_32 text,
age38_43 text,
age48_53 text,
age60_100 text,
singer_name text,
happy text,
sad text,
cry text,
angry text,
surprise text
)""")
conn.commit()
conn.close()
#--------------------------------deleting record---------------------------------------------
def delete_one():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("DELETE from music WHERE rowid = " + delete.get())
delete.delete(0, END)
conn.commit()
conn.close()
label1 = Label(root, text = " ",pady=10)
b1 = Button(root, text = "Create table", command = create_table,padx=35,fg="white",pady=5,bg="green")
b2 = Button(root, text = "AddToDatabase", command = add_one,padx=25,fg="white",pady=5,bg="orange")
b3 = Button(root, text = "Delete", command = delete_one,padx=52.3,fg="white",pady=5,bg="Red")
b4 = Button(root, text = "Info", command = edit,padx=50,fg="white",pady=5,bg="blue")
b5 = Button(root, text = "DataBase Management System by Mohit Gupta",padx=125,pady=10,fg="White",bg="black")
b6 = Button(root, text = "Tools",state=DISABLED,padx=55,pady=10)
b7 = Button(root, text = "# USE FOR ADD TOOL ",state=DISABLED,padx=30,pady=10)
b8 = Button(root, text = "Displayall", command = show_allSongs,padx=25,fg="white",pady=5,bg="orange")
label1.grid(row=2, column=3)
b1.grid(row=6, column=3)
b2.grid(row=23, column=1)
b3.grid(row=12, column=3)
b4.grid(row=14, column=3)
b5.grid(row=1, column=1)
b6.grid(row=1, column=3)
b7.grid(row=2, column=1)
b8.grid(row=25, column=1)
# calling mainloop method which is used
# when your application is ready to run
# and it tells the code to keep displaying
root.mainloop()
| 33.03937
| 398
| 0.640133
|
import tkinter as tk
from tkinter.ttk import *
import sqlite3
from tkinter import *
root = tk.Tk()
root.title("DataBase Manager by Mohit Gupta")
root.geometry("800x640")
songs = Entry(root, width=50)
songs.grid(row=8,column=1,pady=5)
age0_2 = Entry(root, width=50)
age0_2.grid(row=9, column=1,pady=5)
age4_6 = Entry(root, width=50)
age4_6.grid(row=10, column=1,pady=5)
age8_12 = Entry(root, width=50)
age8_12.grid(row=11, column=1,pady=5)
age15_20 = Entry(root, width=50)
age15_20.grid(row=12, column=1,pady=5)
age25_32 = Entry(root, width=50)
age25_32.grid(row=13, column=1,pady=5)
age38_43 = Entry(root, width=50)
age38_43.grid(row=14, column=1,pady=5)
age48_53 = Entry(root, width=50)
age48_53.grid(row=15, column=1,pady=5)
age60_100 = Entry(root, width=50)
age60_100.grid(row=16, column=1,pady=5)
singer_name = Entry(root, width=50)
singer_name.grid(row=17, column=1,pady=5)
h = Entry(root, width=50)
h.grid(row=18, column=1,pady=5)
s = Entry(root, width=50)
s.grid(row=19, column=1,pady=5)
a = Entry(root, width=50)
a.grid(row=20, column=1,pady=5)
cr = Entry(root, width=50)
cr.grid(row=21, column=1,pady=5)
su = Entry(root, width=50)
su.grid(row=22, column=1,pady=5)
delete = Entry(root, width=20)
delete.grid(row=11, column=3, pady=5)
songs_label = Label(root, text="Songs",padx=5)
songs_label.grid(row=8, column=0)
age0_2_label = Label(root, text="Age0_2",padx=5)
age0_2_label.grid(row=9, column=0)
age4_6_label = Label(root, text="Age4_6",padx=5)
age4_6_label.grid(row=10, column=0)
age8_12_label = Label(root, text="Age8_12",padx=5)
age8_12_label.grid(row=11, column=0)
age15_20_label = Label(root, text="Age15_20",padx=5)
age15_20_label.grid(row=12, column=0)
age25_32_label = Label(root, text="Age25_32",padx=5)
age25_32_label.grid(row=13, column=0)
age38_43_label = Label(root, text="Age38_43",padx=5)
age38_43_label.grid(row=14, column=0)
age48_53_label = Label(root, text="Age48_53",padx=5)
age48_53_label.grid(row=15, column=0)
age60_100_label = Label(root, text="Age60_100",padx=5)
age60_100_label.grid(row=16, column=0)
singer_name_label = Label(root, text="singer",padx=5)
singer_name_label.grid(row=17, column=0)
h_label = Label(root, text="Happy",padx=5)
h_label.grid(row=18, column=0)
s_label = Label(root, text="Sad",padx=5)
s_label.grid(row=19, column=0)
a_label = Label(root, text="Angry",padx=5)
a_label.grid(row=20, column=0)
cr_label = Label(root, text="cry",padx=5)
cr_label.grid(row=21, column=0)
su_label = Label(root, text="Surprise",padx=5)
su_label.grid(row=22, column=0)
delete_label = Label(root, text="Select ID")
delete_label.grid(row=11, column=2, pady=10)
def update():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
item_id = delete.get()
c.execute("""UPDATE music SET
songs = :songs,
age0_2 = :age0_2,
age4_6 = :age4_6,
age8_12 = :age8_12,
age15_20 = :age15_20,
age25_32 = :age25_32,
age38_43 = :age38_43,
age48_53 = :age48_53,
age60_100 = :age60_100,
singer_name = :singer_name,
happy = :h,
sad = :s,
angry = :a,
cry = :cr,
surprise = :su,
WHERE oid = :oid""",
{
'songs': songs_editor.get(),
'age0_2': age0_2_editor.get(),
'age4_6': age4_6_editor.get(),
'age8_12': age8_12_editor.get(),
'age15_20': age15_20_editor.get(),
'age25_32': age25_32_editor.get(),
'age38_43': age38_43_editor.get(),
'age48_53': age48_53_editor.get(),
'age60_100': age60_100_editor.get(),
'singer_name': singer_name_editor.get(),
'h': h_editor.get(),
's': s_editor.get(),
'a': a_editor.get(),
'cr': cr_editor.get(),
'su': su_editor.get(),
'oid': item_id
})
conn.commit()
conn.close()
def edit():
editor = Tk()
editor.title("Information")
editor.geometry("600x640")
conn = sqlite3.connect('music4.db')
c = conn.cursor()
item_id = delete.get()
c.execute("SELECT * FROM music WHERE oid = "+ item_id )
items = c.fetchall()
songs_editor = Entry(editor, width=50)
songs_editor.grid(row=8,column=1,pady=5)
age0_2_editor = Entry(editor, width=50)
age0_2_editor.grid(row=9, column=1,pady=5)
age4_6_editor = Entry(editor, width=50)
age4_6_editor.grid(row=10, column=1,pady=5)
age8_12_editor = Entry(editor, width=50)
age8_12_editor.grid(row=11, column=1,pady=5)
age15_20_editor = Entry(editor, width=50)
age15_20_editor.grid(row=12, column=1,pady=5)
age25_32_editor = Entry(editor, width=50)
age25_32_editor.grid(row=13, column=1,pady=5)
age38_43_editor = Entry(editor, width=50)
age38_43_editor.grid(row=14, column=1,pady=5)
age48_53_editor = Entry(editor, width=50)
age48_53_editor.grid(row=15, column=1,pady=5)
age60_100_editor = Entry(editor, width=50)
age60_100_editor.grid(row=16, column=1,pady=5)
singer_name_editor = Entry(editor, width=50)
singer_name_editor.grid(row=17, column=1,pady=5)
h_editor = Entry(editor, width=50)
h_editor.grid(row=17, column=1,pady=5)
s_editor = Entry(editor, width=50)
s_editor.grid(row=18, column=1,pady=5)
a_editor = Entry(editor, width=50)
a_editor.grid(row=19, column=1,pady=5)
cr_editor = Entry(editor, width=50)
cr_editor.grid(row=20, column=1,pady=5)
su_editor= Entry(editor, width=50)
su_editor.grid(row=21, column=1,pady=5)
songs_label = Label(editor, text="Songs",padx=5)
songs_label.grid(row=8, column=0)
age0_2_label = Label(editor, text="Age0_2",padx=5)
age0_2_label.grid(row=9, column=0)
age4_6_label = Label(editor, text="Age0_2",padx=5)
age4_6_label.grid(row=10, column=0)
age8_12_label = Label(editor, text="Age0_2",padx=5)
age8_12_label.grid(row=11, column=0)
age15_20_label = Label(editor, text="Age0_2",padx=5)
age15_20_label.grid(row=12, column=0)
age25_32_label = Label(editor, text="Age0_2",padx=5)
age25_32_label.grid(row=13, column=0)
age38_43_label = Label(editor, text="Age0_2",padx=5)
age38_43_label.grid(row=14, column=0)
age48_53_label = Label(editor, text="Age0_2",padx=5)
age48_53_label.grid(row=15, column=0)
age60_100_label = Label(editor, text="Age0_2",padx=5)
age60_100_label.grid(row=16, column=0)
singer_name_label = Label(editor, text="Age0_2",padx=5)
singer_name_label.grid(row=17, column=0)
h_label = Label(editor, text="Happy",padx=5)
h_label.grid(row=17, column=0)
s_label = Label(editor, text="Sad",padx=5)
s_label.grid(row=18, column=0)
a_label = Label(editor, text="Angry",padx=5)
a_label.grid(row=19, column=0)
cr_label = Label(editor, text="cry",padx=5)
cr_label.grid(row=20, column=0)
su_label = Label(editor, text="Surprise",padx=5)
su_label.grid(row=21, column=0)
for item in items:
songs_editor.insert(0, item[0])
age0_2_editor.insert(0, item[1])
age4_6_editor.insert(0, item[2])
age8_12_editor.insert(0, item[3])
age15_20_editor.insert(0, item[4])
age25_32_editor.insert(0, item[5])
age38_43_editor.insert(0, item[6])
age48_53_editor.insert(0, item[7])
age60_100_editor.insert(0, item[8])
singer_name_editor.insert(0, item[9])
h_editor.insert(0, item[10])
s_editor.insert(0, item[11])
a_editor.insert(0, item[12])
cr_editor.insert(0, item[13])
su_editor.insert(0, item[14])
b4_edit = Button(editor, text = "Info",padx=50,fg="white",pady=5,bg="blue")
b4_edit.grid(row=34, column=1)
def add_one():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("INSERT INTO music VALUES (:songs, :age0_2, :age4_6, :age8_12, :age15_20, :age25_32, :age38_43, :age48_53, :age60_100, :singer_name, :h, :s, :a, :cr, :su)",
{
'songs': songs.get(),
'age0_2': age0_2.get(),
'age4_6': age4_6.get(),
'age8_12': age8_12.get(),
'age15_20': age15_20.get(),
'age25_32': age25_32.get(),
'age38_43': age38_43.get(),
'age48_53': age48_53.get(),
'age60_100': age60_100.get(),
'singer_name': singer_name.get(),
'h': h.get(),
's': s.get(),
'a': a.get(),
'cr': cr.get(),
'su': su.get()
})
songs.delete(0, END)
age0_2.delete(0, END)
age4_6.delete(0, END)
age8_12.delete(0, END)
age15_20.delete(0, END)
age25_32.delete(0, END)
age38_43.delete(0, END)
age48_53.delete(0, END)
age60_100.delete(0, END)
singer_name.delete(0, END)
h.delete(0, END)
s.delete(0, END)
a.delete(0, END)
cr.delete(0, END)
su.delete(0, END)
conn.commit()
conn.close()
def show_allSongs():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM music")
items = c.fetchall()
print_items = ''
for item in items:
print_items = str(item[0]) + " " + str(item[1]) + "\t\t\t|" + str(item[2]) + " " + str(item[3]) + " " + str(item[4])+ " " + str(item[5]) + " " + str(item[6]) + " " + str(item[7]) + " " + str(item[8]) + " " + str(item[9]) + " | " + str(item[11]) + " " + str(item[12]) + " " + str(item[13]) + " " + str(item[14]) + " " + str(item[15]) + " | " + str(item[10])
print(print_items)
print("---------------------------------------------------------------------------------------------------\n")
conn.commit()
conn.close()
age0_2 text,
age4_6 text,
age8_12 text,
age15_20 text,
age25_32 text,
age38_43 text,
age48_53 text,
age60_100 text,
singer_name text,
happy text,
sad text,
cry text,
angry text,
surprise text
)""")
conn.commit()
conn.close()
def delete_one():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("DELETE from music WHERE rowid = " + delete.get())
delete.delete(0, END)
conn.commit()
conn.close()
label1 = Label(root, text = " ",pady=10)
b1 = Button(root, text = "Create table", command = create_table,padx=35,fg="white",pady=5,bg="green")
b2 = Button(root, text = "AddToDatabase", command = add_one,padx=25,fg="white",pady=5,bg="orange")
b3 = Button(root, text = "Delete", command = delete_one,padx=52.3,fg="white",pady=5,bg="Red")
b4 = Button(root, text = "Info", command = edit,padx=50,fg="white",pady=5,bg="blue")
b5 = Button(root, text = "DataBase Management System by Mohit Gupta",padx=125,pady=10,fg="White",bg="black")
b6 = Button(root, text = "Tools",state=DISABLED,padx=55,pady=10)
b7 = Button(root, text = "# USE FOR ADD TOOL ",state=DISABLED,padx=30,pady=10)
b8 = Button(root, text = "Displayall", command = show_allSongs,padx=25,fg="white",pady=5,bg="orange")
label1.grid(row=2, column=3)
b1.grid(row=6, column=3)
b2.grid(row=23, column=1)
b3.grid(row=12, column=3)
b4.grid(row=14, column=3)
b5.grid(row=1, column=1)
b6.grid(row=1, column=3)
b7.grid(row=2, column=1)
b8.grid(row=25, column=1)
root.mainloop()
| true
| true
|
f7158e044b9155a5343668120d5af436908eaa72
| 8,428
|
py
|
Python
|
synchronization/SyncNetInstance.py
|
PlatterDataset/feature
|
2ebdc1b28498b709a0c91e60c19bfc731006bc50
|
[
"MIT"
] | null | null | null |
synchronization/SyncNetInstance.py
|
PlatterDataset/feature
|
2ebdc1b28498b709a0c91e60c19bfc731006bc50
|
[
"MIT"
] | null | null | null |
synchronization/SyncNetInstance.py
|
PlatterDataset/feature
|
2ebdc1b28498b709a0c91e60c19bfc731006bc50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#-*- coding: utf-8 -*-
# Video 25 FPS, Audio 16000HZ
import torch
import numpy
import time, pdb, argparse, subprocess, os, math, glob
import cv2
import python_speech_features
from scipy import signal
from scipy.io import wavfile
from SyncNetModel import *
from shutil import rmtree
# ==================== Get OFFSET ====================
def get_median(data1):
data = sorted(data1)
size = len(data)
if size % 2 == 0: # 判断列表长度为偶数
median = (data[size//2]+data[size//2-1])/2
data[0] = median
if size % 2 == 1: # 判断列表长度为奇数
median = data[(size-1)//2]
data[0] = median
return data[0]
def calc_pdist(feat1, feat2, vshift=40):
win_size = vshift*2+1
feat2p = torch.nn.functional.pad(feat2,(0,0,vshift,vshift))
dists = []
for i in range(0,len(feat1)):
dists.append(torch.nn.functional.pairwise_distance(feat1[[i],:].repeat(win_size, 1), feat2p[i:i+win_size,:]))
return dists
# ==================== MAIN DEF ====================
class SyncNetInstance(torch.nn.Module):
def __init__(self, dropout = 0, num_layers_in_fc_layers = 1024):
super(SyncNetInstance, self).__init__();
self.__S__ = S(num_layers_in_fc_layers = num_layers_in_fc_layers).cuda();
def evaluate(self, opt, videofile, num):
self.__S__.eval();
# ========== ==========
# Convert files
# ========== ==========
if os.path.exists(os.path.join(opt.tmp_dir,opt.reference)):
rmtree(os.path.join(opt.tmp_dir,opt.reference))
os.makedirs(os.path.join(opt.tmp_dir,opt.reference))
command = ("ffmpeg -y -i %s -threads 1 -f image2 %s" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'%06d.jpg')))
output = subprocess.call(command, shell=True, stdout=None)
command = ("ffmpeg -y -i %s -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 %s" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'audio.wav')))
output = subprocess.call(command, shell=True, stdout=None)
# ========== ==========
# Load video
# ========== ==========
images = []
flist = glob.glob(os.path.join(opt.tmp_dir,opt.reference,'*.jpg'))
flist.sort()
for fname in flist:
images.append(cv2.imread(fname))
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
# ========== ==========
# Load audio
# ========== ==========
sample_rate, audio = wavfile.read(os.path.join(opt.tmp_dir,opt.reference,'audio.wav'))
mfcc = zip(*python_speech_features.mfcc(audio,sample_rate))
mfcc = numpy.stack([numpy.array(i) for i in mfcc])
torch.save(mfcc,'./mfcc_saver/mfcc'+str(num)+'.pt')
ww = open('./mfcc_saver/mfcc'+str(num)+'.txt','w')
ww.write(str(mfcc))
cc = numpy.expand_dims(numpy.expand_dims(mfcc,axis=0),axis=0)
cct = torch.autograd.Variable(torch.from_numpy(cc.astype(float)).float())
# ========== ==========
# Check audio and video input length
# ========== ==========
if (float(len(audio))/16000) != (float(len(images))/25) :
print("WARNING: Audio (%.4fs) and video (%.4fs) lengths are different."%(float(len(audio))/16000,float(len(images))/25))
min_length = min(len(images),math.floor(len(audio)/640))
# ========== ==========
# Generate video and audio feats
# ========== ==========
lastframe = min_length-5
im_feat = []
cc_feat = []
wr = open('./'+str(opt.reference)+'_'+str(num)+'_resultoff.txt','w')
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lip(im_in.cuda());
im_feat.append(im_out.data.cpu())
cc_batch = [ cct[:,:,:,vframe*4:vframe*4+20] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
cc_in = torch.cat(cc_batch,0)
cc_out = self.__S__.forward_aud(cc_in.cuda())
cc_feat.append(cc_out.data.cpu())
im_feat = torch.cat(im_feat,0)
cc_feat = torch.cat(cc_feat,0)
# ========== ==========
# Compute offset
# ========== ==========
print('Compute time %.3f sec.' % (time.time()-tS))
dists = calc_pdist(im_feat,cc_feat,vshift=opt.vshift)
mdist = torch.mean(torch.stack(dists,1),1)
off = []
avg_dist = []
for t in range(0,len(im_feat)):
tt = 10000
offy = 0
of = 0
of_m = 0
dis_mid = 0
dis_min = 1000000000
for k in range(0,len(dists[t])):
if t == 0:
avg_dist.append(dists[t][k])
else:
avg_dist[k] += dists[t][k]
if (t+1)% 100 == 0 or t == len(im_feat)-1:
if avg_dist[k] < dis_min:
dis_min = avg_dist[k]
of = k
if dists[t][k]<tt:
tt = dists[t][k]
offy = k
if (t+1)%100 == 0 or t == len(im_feat) -1:
dis_mid = get_median(avg_dist)
for k in range(len(avg_dist)):
avg_dist[k] = 0
wr.write(str(t%100)+' ')
wr.write(str((opt.vshift-of) * 0.04)+'s ')
if (t+1)%100 != 0:
wr.write("conf = "+str((dis_mid.item()-dis_min.item())/((t+1)%100))+'\n')#confidence改成medium
else:
wr.write("conf = "+str((dis_mid.item()-dis_min.item())/100)+'\n')
off.append(opt.vshift-offy)
off = numpy.array(off)
minval, minidx = torch.min(mdist,0)
offset = opt.vshift-minidx
conf = torch.median(mdist) - minval
fdist = numpy.stack([dist[minidx].numpy() for dist in dists])
# fdist = numpy.pad(fdist, (3,3), 'constant', constant_values=15)
fconf = torch.median(mdist).numpy() - fdist
fconfm = signal.medfilt(fconf,kernel_size=9)
numpy.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print('Framewise conf: ')
print(fconfm)
print('AV offset: \t%d \nMin dist: \t%.3f\nConfidence: \t%.3f' % (offset,minval,conf))
dists_npy = numpy.array([ dist.numpy() for dist in dists ])
return off, conf.numpy(), dists_npy
def extract_feature(self, opt, videofile):
self.__S__.eval();
# ========== ==========
# Load video
# ========== ==========
cap = cv2.VideoCapture(videofile)
frame_num = 1;
images = []
while frame_num:
frame_num += 1
ret, image = cap.read()
if ret == 0:
break
images.append(image)
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
# ========== ==========
# Generate video feats
# ========== ==========
lastframe = len(images)-4
im_feat = []
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lipfeat(im_in.cuda());
im_feat.append(im_out.data.cpu())
im_feat = torch.cat(im_feat,0)
# ========== ==========
# Compute offset
# ========== ==========
print('Compute time %.3f sec.' % (time.time()-tS))
return im_feat
def loadParameters(self, path):
loaded_state = torch.load(path, map_location=lambda storage, loc: storage);
self_state = self.__S__.state_dict();
for name, param in loaded_state.items():
self_state[name].copy_(param);
| 32.666667
| 154
| 0.51495
|
import torch
import numpy
import time, pdb, argparse, subprocess, os, math, glob
import cv2
import python_speech_features
from scipy import signal
from scipy.io import wavfile
from SyncNetModel import *
from shutil import rmtree
def get_median(data1):
data = sorted(data1)
size = len(data)
if size % 2 == 0:
median = (data[size//2]+data[size//2-1])/2
data[0] = median
if size % 2 == 1:
median = data[(size-1)//2]
data[0] = median
return data[0]
def calc_pdist(feat1, feat2, vshift=40):
win_size = vshift*2+1
feat2p = torch.nn.functional.pad(feat2,(0,0,vshift,vshift))
dists = []
for i in range(0,len(feat1)):
dists.append(torch.nn.functional.pairwise_distance(feat1[[i],:].repeat(win_size, 1), feat2p[i:i+win_size,:]))
return dists
class SyncNetInstance(torch.nn.Module):
def __init__(self, dropout = 0, num_layers_in_fc_layers = 1024):
super(SyncNetInstance, self).__init__();
self.__S__ = S(num_layers_in_fc_layers = num_layers_in_fc_layers).cuda();
def evaluate(self, opt, videofile, num):
self.__S__.eval();
if os.path.exists(os.path.join(opt.tmp_dir,opt.reference)):
rmtree(os.path.join(opt.tmp_dir,opt.reference))
os.makedirs(os.path.join(opt.tmp_dir,opt.reference))
command = ("ffmpeg -y -i %s -threads 1 -f image2 %s" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'%06d.jpg')))
output = subprocess.call(command, shell=True, stdout=None)
command = ("ffmpeg -y -i %s -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 %s" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'audio.wav')))
output = subprocess.call(command, shell=True, stdout=None)
images = []
flist = glob.glob(os.path.join(opt.tmp_dir,opt.reference,'*.jpg'))
flist.sort()
for fname in flist:
images.append(cv2.imread(fname))
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
sample_rate, audio = wavfile.read(os.path.join(opt.tmp_dir,opt.reference,'audio.wav'))
mfcc = zip(*python_speech_features.mfcc(audio,sample_rate))
mfcc = numpy.stack([numpy.array(i) for i in mfcc])
torch.save(mfcc,'./mfcc_saver/mfcc'+str(num)+'.pt')
ww = open('./mfcc_saver/mfcc'+str(num)+'.txt','w')
ww.write(str(mfcc))
cc = numpy.expand_dims(numpy.expand_dims(mfcc,axis=0),axis=0)
cct = torch.autograd.Variable(torch.from_numpy(cc.astype(float)).float())
if (float(len(audio))/16000) != (float(len(images))/25) :
print("WARNING: Audio (%.4fs) and video (%.4fs) lengths are different."%(float(len(audio))/16000,float(len(images))/25))
min_length = min(len(images),math.floor(len(audio)/640))
lastframe = min_length-5
im_feat = []
cc_feat = []
wr = open('./'+str(opt.reference)+'_'+str(num)+'_resultoff.txt','w')
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lip(im_in.cuda());
im_feat.append(im_out.data.cpu())
cc_batch = [ cct[:,:,:,vframe*4:vframe*4+20] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
cc_in = torch.cat(cc_batch,0)
cc_out = self.__S__.forward_aud(cc_in.cuda())
cc_feat.append(cc_out.data.cpu())
im_feat = torch.cat(im_feat,0)
cc_feat = torch.cat(cc_feat,0)
print('Compute time %.3f sec.' % (time.time()-tS))
dists = calc_pdist(im_feat,cc_feat,vshift=opt.vshift)
mdist = torch.mean(torch.stack(dists,1),1)
off = []
avg_dist = []
for t in range(0,len(im_feat)):
tt = 10000
offy = 0
of = 0
of_m = 0
dis_mid = 0
dis_min = 1000000000
for k in range(0,len(dists[t])):
if t == 0:
avg_dist.append(dists[t][k])
else:
avg_dist[k] += dists[t][k]
if (t+1)% 100 == 0 or t == len(im_feat)-1:
if avg_dist[k] < dis_min:
dis_min = avg_dist[k]
of = k
if dists[t][k]<tt:
tt = dists[t][k]
offy = k
if (t+1)%100 == 0 or t == len(im_feat) -1:
dis_mid = get_median(avg_dist)
for k in range(len(avg_dist)):
avg_dist[k] = 0
wr.write(str(t%100)+' ')
wr.write(str((opt.vshift-of) * 0.04)+'s ')
if (t+1)%100 != 0:
wr.write("conf = "+str((dis_mid.item()-dis_min.item())/((t+1)%100))+'\n')
else:
wr.write("conf = "+str((dis_mid.item()-dis_min.item())/100)+'\n')
off.append(opt.vshift-offy)
off = numpy.array(off)
minval, minidx = torch.min(mdist,0)
offset = opt.vshift-minidx
conf = torch.median(mdist) - minval
fdist = numpy.stack([dist[minidx].numpy() for dist in dists])
fconf = torch.median(mdist).numpy() - fdist
fconfm = signal.medfilt(fconf,kernel_size=9)
numpy.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print('Framewise conf: ')
print(fconfm)
print('AV offset: \t%d \nMin dist: \t%.3f\nConfidence: \t%.3f' % (offset,minval,conf))
dists_npy = numpy.array([ dist.numpy() for dist in dists ])
return off, conf.numpy(), dists_npy
def extract_feature(self, opt, videofile):
self.__S__.eval();
cap = cv2.VideoCapture(videofile)
frame_num = 1;
images = []
while frame_num:
frame_num += 1
ret, image = cap.read()
if ret == 0:
break
images.append(image)
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
lastframe = len(images)-4
im_feat = []
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lipfeat(im_in.cuda());
im_feat.append(im_out.data.cpu())
im_feat = torch.cat(im_feat,0)
print('Compute time %.3f sec.' % (time.time()-tS))
return im_feat
def loadParameters(self, path):
loaded_state = torch.load(path, map_location=lambda storage, loc: storage);
self_state = self.__S__.state_dict();
for name, param in loaded_state.items():
self_state[name].copy_(param);
| true
| true
|
f7158e76c232bf5249188b7a0fe3dc8f0b03f00c
| 405
|
py
|
Python
|
turtlebot3_automatic_parking_vision/setup.py
|
herb-kuta-lge/turtlebot3_applications
|
b41f06fda13bcab43800e311c8df63aa0f075445
|
[
"Apache-2.0"
] | 70
|
2017-06-14T16:48:51.000Z
|
2022-03-15T02:44:14.000Z
|
turtlebot3_automatic_parking_vision/setup.py
|
herb-kuta-lge/turtlebot3_applications
|
b41f06fda13bcab43800e311c8df63aa0f075445
|
[
"Apache-2.0"
] | 20
|
2018-06-04T12:06:30.000Z
|
2021-09-10T14:01:25.000Z
|
turtlebot3_automatic_parking_vision/setup.py
|
herb-kuta-lge/turtlebot3_applications
|
b41f06fda13bcab43800e311c8df63aa0f075445
|
[
"Apache-2.0"
] | 47
|
2017-10-31T23:51:19.000Z
|
2022-03-23T12:38:48.000Z
|
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['turtlebot3_automatic_parking_vision'],
package_dir={'': 'src'}
)
setup(**setup_args)
| 28.928571
| 70
| 0.780247
|
s_setup(
packages=['turtlebot3_automatic_parking_vision'],
package_dir={'': 'src'}
)
setup(**setup_args)
| true
| true
|
f7158e77cbc37e40ac0788e476409ce0f922c325
| 5,748
|
py
|
Python
|
src/deepspeech_training/util/config.py
|
googleinterns/deepspeech-reconstruction
|
72f28d1e9064d221b3421c302a8725a8c71859ee
|
[
"Apache-2.0"
] | 3
|
2021-08-20T16:40:09.000Z
|
2022-02-08T23:17:52.000Z
|
src/deepspeech_training/util/config.py
|
googleinterns/deepspeech-reconstruction
|
72f28d1e9064d221b3421c302a8725a8c71859ee
|
[
"Apache-2.0"
] | 1
|
2022-03-22T04:16:15.000Z
|
2022-03-22T04:26:03.000Z
|
src/deepspeech_training/util/config.py
|
googleinterns/deepspeech-reconstruction
|
72f28d1e9064d221b3421c302a8725a8c71859ee
|
[
"Apache-2.0"
] | 1
|
2021-04-28T21:51:12.000Z
|
2021-04-28T21:51:12.000Z
|
from __future__ import absolute_import, division, print_function
import os
import sys
import tensorflow.compat.v1 as tfv1
from attrdict import AttrDict
from xdg import BaseDirectory as xdg
from src.flags import FLAGS
from .gpu import get_available_gpus
from .logging import log_error
from .text import Alphabet, UTF8Alphabet
from .helpers import parse_file_size
class ConfigSingleton:
_config = None
def __getattr__(self, name):
if not ConfigSingleton._config:
raise RuntimeError("Global configuration not yet initialized.")
if not hasattr(ConfigSingleton._config, name):
raise RuntimeError("Configuration option {} not found in config.".format(name))
return ConfigSingleton._config[name]
Config = ConfigSingleton() # pylint: disable=invalid-name
def initialize_globals():
c = AttrDict()
# Read-buffer
FLAGS.read_buffer = parse_file_size(FLAGS.read_buffer)
# Set default dropout rates
if FLAGS.dropout_rate2 < 0:
FLAGS.dropout_rate2 = FLAGS.dropout_rate
if FLAGS.dropout_rate3 < 0:
FLAGS.dropout_rate3 = FLAGS.dropout_rate
if FLAGS.dropout_rate6 < 0:
FLAGS.dropout_rate6 = FLAGS.dropout_rate
# Set default checkpoint dir
if not FLAGS.checkpoint_dir:
FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints'))
if FLAGS.load_train not in ['last', 'best', 'init', 'auto']:
FLAGS.load_train = 'auto'
if FLAGS.load_evaluate not in ['last', 'best', 'auto']:
FLAGS.load_evaluate = 'auto'
# Set default summary dir
if not FLAGS.summary_dir:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))
# Standard session configuration that'll be used for all new sessions.
c.session_config = tfv1.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
gpu_options=tfv1.GPUOptions(allow_growth=FLAGS.use_allow_growth))
# CPU device
c.cpu_device = '/cpu:0'
# Available GPU devices
c.available_devices = get_available_gpus(c.session_config)
# If there is no GPU available, we fall back to CPU based operation
if not c.available_devices:
c.available_devices = [c.cpu_device]
if FLAGS.utf8:
c.alphabet = UTF8Alphabet()
else:
c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants, please refer to
# doc/Geometry.md
# Number of MFCC features
c.n_input = 26 # TODO: Determine this programmatically from the sample rate
# The number of frames in the context
c.n_context = 9 # TODO: Determine the optimal value using a validation data set
# Number of units in hidden layers
c.n_hidden = FLAGS.n_hidden
c.n_hidden_1 = c.n_hidden
c.n_hidden_2 = c.n_hidden
c.n_hidden_5 = c.n_hidden
# LSTM cell state dimension
c.n_cell_dim = c.n_hidden
# The number of units in the third layer, which feeds in to the LSTM
c.n_hidden_3 = c.n_cell_dim
# Units in the sixth layer = number of characters in the target language plus one
c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label
# Size of audio window in samples
if (FLAGS.feature_win_len * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_len value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_len value or resample your audio accordingly.'
''.format(FLAGS.feature_win_len, FLAGS.feature_win_len / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len / 1000)
# Stride for feature computations in samples
if (FLAGS.feature_win_step * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_step value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_step value or resample your audio accordingly.'
''.format(FLAGS.feature_win_step, FLAGS.feature_win_step / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step / 1000)
if FLAGS.one_shot_infer:
if not os.path.exists(FLAGS.one_shot_infer):
log_error('Path specified in --one_shot_infer is not a valid file.')
sys.exit(1)
if FLAGS.train_cudnn and FLAGS.load_cudnn:
log_error('Trying to use --train_cudnn, but --load_cudnn '
'was also specified. The --load_cudnn flag is only '
'needed when converting a CuDNN RNN checkpoint to '
'a CPU-capable graph. If your system is capable of '
'using CuDNN RNN, you can just specify the CuDNN RNN '
'checkpoint normally with --save_checkpoint_dir.')
sys.exit(1)
# If separate save and load flags were not specified, default to load and save
# from the same dir.
if not FLAGS.save_checkpoint_dir:
FLAGS.save_checkpoint_dir = FLAGS.checkpoint_dir
if not FLAGS.load_checkpoint_dir:
FLAGS.load_checkpoint_dir = FLAGS.checkpoint_dir
ConfigSingleton._config = c # pylint: disable=protected-access
| 38.066225
| 108
| 0.677279
|
from __future__ import absolute_import, division, print_function
import os
import sys
import tensorflow.compat.v1 as tfv1
from attrdict import AttrDict
from xdg import BaseDirectory as xdg
from src.flags import FLAGS
from .gpu import get_available_gpus
from .logging import log_error
from .text import Alphabet, UTF8Alphabet
from .helpers import parse_file_size
class ConfigSingleton:
_config = None
def __getattr__(self, name):
if not ConfigSingleton._config:
raise RuntimeError("Global configuration not yet initialized.")
if not hasattr(ConfigSingleton._config, name):
raise RuntimeError("Configuration option {} not found in config.".format(name))
return ConfigSingleton._config[name]
Config = ConfigSingleton()
def initialize_globals():
c = AttrDict()
FLAGS.read_buffer = parse_file_size(FLAGS.read_buffer)
if FLAGS.dropout_rate2 < 0:
FLAGS.dropout_rate2 = FLAGS.dropout_rate
if FLAGS.dropout_rate3 < 0:
FLAGS.dropout_rate3 = FLAGS.dropout_rate
if FLAGS.dropout_rate6 < 0:
FLAGS.dropout_rate6 = FLAGS.dropout_rate
if not FLAGS.checkpoint_dir:
FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints'))
if FLAGS.load_train not in ['last', 'best', 'init', 'auto']:
FLAGS.load_train = 'auto'
if FLAGS.load_evaluate not in ['last', 'best', 'auto']:
FLAGS.load_evaluate = 'auto'
if not FLAGS.summary_dir:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))
c.session_config = tfv1.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
gpu_options=tfv1.GPUOptions(allow_growth=FLAGS.use_allow_growth))
# CPU device
c.cpu_device = '/cpu:0'
# Available GPU devices
c.available_devices = get_available_gpus(c.session_config)
# If there is no GPU available, we fall back to CPU based operation
if not c.available_devices:
c.available_devices = [c.cpu_device]
if FLAGS.utf8:
c.alphabet = UTF8Alphabet()
else:
c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants, please refer to
# doc/Geometry.md
# Number of MFCC features
c.n_input = 26 # TODO: Determine this programmatically from the sample rate
# The number of frames in the context
c.n_context = 9 # TODO: Determine the optimal value using a validation data set
# Number of units in hidden layers
c.n_hidden = FLAGS.n_hidden
c.n_hidden_1 = c.n_hidden
c.n_hidden_2 = c.n_hidden
c.n_hidden_5 = c.n_hidden
# LSTM cell state dimension
c.n_cell_dim = c.n_hidden
# The number of units in the third layer, which feeds in to the LSTM
c.n_hidden_3 = c.n_cell_dim
# Units in the sixth layer = number of characters in the target language plus one
c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label
# Size of audio window in samples
if (FLAGS.feature_win_len * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_len value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_len value or resample your audio accordingly.'
''.format(FLAGS.feature_win_len, FLAGS.feature_win_len / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len / 1000)
# Stride for feature computations in samples
if (FLAGS.feature_win_step * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_step value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_step value or resample your audio accordingly.'
''.format(FLAGS.feature_win_step, FLAGS.feature_win_step / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step / 1000)
if FLAGS.one_shot_infer:
if not os.path.exists(FLAGS.one_shot_infer):
log_error('Path specified in --one_shot_infer is not a valid file.')
sys.exit(1)
if FLAGS.train_cudnn and FLAGS.load_cudnn:
log_error('Trying to use --train_cudnn, but --load_cudnn '
'was also specified. The --load_cudnn flag is only '
'needed when converting a CuDNN RNN checkpoint to '
'a CPU-capable graph. If your system is capable of '
'using CuDNN RNN, you can just specify the CuDNN RNN '
'checkpoint normally with --save_checkpoint_dir.')
sys.exit(1)
# If separate save and load flags were not specified, default to load and save
# from the same dir.
if not FLAGS.save_checkpoint_dir:
FLAGS.save_checkpoint_dir = FLAGS.checkpoint_dir
if not FLAGS.load_checkpoint_dir:
FLAGS.load_checkpoint_dir = FLAGS.checkpoint_dir
ConfigSingleton._config = c # pylint: disable=protected-access
| true
| true
|
f715901de4244e706505bcbc2ad3c07df8e07766
| 5,685
|
py
|
Python
|
lib/itertools.py
|
ralic/grumpy3
|
a471f7ba64167d5812c0f6701380f9f71fa937c3
|
[
"Apache-2.0"
] | null | null | null |
lib/itertools.py
|
ralic/grumpy3
|
a471f7ba64167d5812c0f6701380f9f71fa937c3
|
[
"Apache-2.0"
] | null | null | null |
lib/itertools.py
|
ralic/grumpy3
|
a471f7ba64167d5812c0f6701380f9f71fa937c3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for iterating over containers."""
import _collections
import sys
class chain(object):
def from_iterable(cls, iterables):
for it in iterables:
for element in it:
yield element
from_iterable = classmethod(from_iterable)
def __init__(self, *iterables):
if not iterables:
self.iterables = iter([[]])
else:
self.iterables = iter(iterables)
self.curriter = iter(next(self.iterables))
def __iter__(self):
return self
def __next__(self):
flag = True
while flag:
try:
ret = next(self.curriter)
flag = False
except StopIteration:
self.curriter = iter(next(self.iterables))
return ret
def compress(data, selectors):
return (d for d,s in zip(data, selectors) if s)
def count(start=0, step=1):
n = start
while True:
yield n
n += step
def cycle(iterable):
saved = []
for element in iterable:
yield element
saved.append(element)
while saved:
for element in saved:
yield element
def dropwhile(predicate, iterable):
iterable = iter(iterable)
for x in iterable:
if not predicate(x):
yield x
break
for x in iterable:
yield x
class groupby(object):
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def __next__(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
def ifilter(predicate, iterable):
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
def ifilterfalse(predicate, iterable):
if predicate is None:
predicate = bool
for x in iterable:
if not predicate(x):
yield x
def imap(function, *iterables):
iterables = list(map(iter, iterables))
while True:
args = [next(it) for it in iterables]
if function is None:
yield tuple(args)
else:
yield function(*args)
def islice(iterable, *args):
s = slice(*args)
it = iter(range(s.start or 0, s.stop or sys.maxsize, s.step or 1))
nexti = next(it)
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
def izip(*iterables):
iterators = list(map(iter, iterables))
while iterators:
yield tuple(map(next, iterators))
class ZipExhausted(Exception):
pass
def izip_longest(*args, **kwds):
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
counter = [len(args) - 1]
def sentinel():
if not counter[0]:
raise ZipExhausted
counter[0] -= 1
yield fillvalue
fillers = repeat(fillvalue)
iterators = [chain(it, sentinel(), fillers) for it in args]
try:
while iterators:
yield tuple(map(next, iterators))
except ZipExhausted:
pass
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(list(range(n)), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
def combinations(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in permutations(list(range(n)), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations_with_replacement(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in product(list(range(n)), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def repeat(object, times=None):
if times is None:
while True:
yield object
else:
for i in range(times):
yield object
def starmap(function, iterable):
for args in iterable:
yield function(*args)
def takewhile(predicate, iterable):
for x in iterable:
if predicate(x):
yield x
else:
break
def tee(iterable, n=2):
it = iter(iterable)
deques = [_collections.deque() for i in range(n)]
def gen(mydeque):
while True:
if not mydeque:
newval = next(it)
for d in deques:
d.append(newval)
yield mydeque.popleft()
return tuple(gen(d) for d in deques)
| 23.491736
| 74
| 0.65277
|
import _collections
import sys
class chain(object):
def from_iterable(cls, iterables):
for it in iterables:
for element in it:
yield element
from_iterable = classmethod(from_iterable)
def __init__(self, *iterables):
if not iterables:
self.iterables = iter([[]])
else:
self.iterables = iter(iterables)
self.curriter = iter(next(self.iterables))
def __iter__(self):
return self
def __next__(self):
flag = True
while flag:
try:
ret = next(self.curriter)
flag = False
except StopIteration:
self.curriter = iter(next(self.iterables))
return ret
def compress(data, selectors):
return (d for d,s in zip(data, selectors) if s)
def count(start=0, step=1):
n = start
while True:
yield n
n += step
def cycle(iterable):
saved = []
for element in iterable:
yield element
saved.append(element)
while saved:
for element in saved:
yield element
def dropwhile(predicate, iterable):
iterable = iter(iterable)
for x in iterable:
if not predicate(x):
yield x
break
for x in iterable:
yield x
class groupby(object):
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def __next__(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it)
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it)
self.currkey = self.keyfunc(self.currvalue)
def ifilter(predicate, iterable):
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
def ifilterfalse(predicate, iterable):
if predicate is None:
predicate = bool
for x in iterable:
if not predicate(x):
yield x
def imap(function, *iterables):
iterables = list(map(iter, iterables))
while True:
args = [next(it) for it in iterables]
if function is None:
yield tuple(args)
else:
yield function(*args)
def islice(iterable, *args):
s = slice(*args)
it = iter(range(s.start or 0, s.stop or sys.maxsize, s.step or 1))
nexti = next(it)
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
def izip(*iterables):
iterators = list(map(iter, iterables))
while iterators:
yield tuple(map(next, iterators))
class ZipExhausted(Exception):
pass
def izip_longest(*args, **kwds):
fillvalue = kwds.get('fillvalue')
counter = [len(args) - 1]
def sentinel():
if not counter[0]:
raise ZipExhausted
counter[0] -= 1
yield fillvalue
fillers = repeat(fillvalue)
iterators = [chain(it, sentinel(), fillers) for it in args]
try:
while iterators:
yield tuple(map(next, iterators))
except ZipExhausted:
pass
def product(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(list(range(n)), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
def combinations(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in permutations(list(range(n)), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations_with_replacement(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in product(list(range(n)), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def repeat(object, times=None):
if times is None:
while True:
yield object
else:
for i in range(times):
yield object
def starmap(function, iterable):
for args in iterable:
yield function(*args)
def takewhile(predicate, iterable):
for x in iterable:
if predicate(x):
yield x
else:
break
def tee(iterable, n=2):
it = iter(iterable)
deques = [_collections.deque() for i in range(n)]
def gen(mydeque):
while True:
if not mydeque:
newval = next(it)
for d in deques:
d.append(newval)
yield mydeque.popleft()
return tuple(gen(d) for d in deques)
| true
| true
|
f7159091f18210b97ef9f6170f617a8643d4d010
| 1,414
|
py
|
Python
|
hbi/server/tornado_server.py
|
Glutexo/host-inventory
|
558b77eff633e5ec7cdb45393e767e4a05bca470
|
[
"Apache-2.0"
] | 1
|
2018-09-17T13:57:55.000Z
|
2018-09-17T13:57:55.000Z
|
hbi/server/tornado_server.py
|
Glutexo/host-inventory
|
558b77eff633e5ec7cdb45393e767e4a05bca470
|
[
"Apache-2.0"
] | 3
|
2018-10-02T10:05:12.000Z
|
2018-10-10T09:33:47.000Z
|
hbi/server/tornado_server.py
|
Glutexo/host-inventory
|
558b77eff633e5ec7cdb45393e767e4a05bca470
|
[
"Apache-2.0"
] | 3
|
2018-08-15T16:50:51.000Z
|
2018-09-26T08:52:44.000Z
|
import json, os
from threading import Thread
from tornado.ioloop import IOLoop
import tornado.web
from hbi.model import Host, Filter
from hbi.server import Service
class RootHandler(tornado.web.RequestHandler):
def get(self):
self.write("boop")
class EntitiesPoster(tornado.web.RequestHandler):
def post(self):
hosts_json = json.loads(self.request.body)
hosts = (Host.from_json(h) for h in hosts_json)
ret = self.application.service.create_or_update(hosts)
self.write(json.dumps([h.to_json() for h in ret]))
class EntitiesSearcher(tornado.web.RequestHandler):
def post(self):
filters_json = json.loads(self.request.body) if self.request.body else None
filters = [Filter.from_json(h) for h in filters_json] if filters_json else None
ret = self.application.service.get(filters)
self.write(json.dumps([h.to_json() for h in ret]))
def serve_tornado():
app = tornado.web.Application([
(r"/", RootHandler),
(r"/entities/search", EntitiesSearcher),
(r"/entities", EntitiesPoster),
])
app.listen(int(os.environ.get("PORT", "50051")))
app.service = Service()
loop = IOLoop.current()
class TornadoRunThread(Thread):
def run(self):
loop.start()
TornadoRunThread().start()
return app, loop
if __name__ == "__main__":
app, loop = serve_tornado()
| 25.709091
| 87
| 0.66761
|
import json, os
from threading import Thread
from tornado.ioloop import IOLoop
import tornado.web
from hbi.model import Host, Filter
from hbi.server import Service
class RootHandler(tornado.web.RequestHandler):
def get(self):
self.write("boop")
class EntitiesPoster(tornado.web.RequestHandler):
def post(self):
hosts_json = json.loads(self.request.body)
hosts = (Host.from_json(h) for h in hosts_json)
ret = self.application.service.create_or_update(hosts)
self.write(json.dumps([h.to_json() for h in ret]))
class EntitiesSearcher(tornado.web.RequestHandler):
def post(self):
filters_json = json.loads(self.request.body) if self.request.body else None
filters = [Filter.from_json(h) for h in filters_json] if filters_json else None
ret = self.application.service.get(filters)
self.write(json.dumps([h.to_json() for h in ret]))
def serve_tornado():
app = tornado.web.Application([
(r"/", RootHandler),
(r"/entities/search", EntitiesSearcher),
(r"/entities", EntitiesPoster),
])
app.listen(int(os.environ.get("PORT", "50051")))
app.service = Service()
loop = IOLoop.current()
class TornadoRunThread(Thread):
def run(self):
loop.start()
TornadoRunThread().start()
return app, loop
if __name__ == "__main__":
app, loop = serve_tornado()
| true
| true
|
f71590e5707ba2a3e6cb07b4a5957c674ad9a1d3
| 4,112
|
py
|
Python
|
members/management/commands/sent-invite.py
|
leonrenkema/makerspaceleiden-crm
|
36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c
|
[
"Apache-2.0"
] | 5
|
2019-03-12T21:38:32.000Z
|
2021-11-06T15:26:56.000Z
|
members/management/commands/sent-invite.py
|
leonrenkema/makerspaceleiden-crm
|
36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c
|
[
"Apache-2.0"
] | 33
|
2019-01-21T15:54:50.000Z
|
2021-05-18T17:54:52.000Z
|
members/management/commands/sent-invite.py
|
leonrenkema/makerspaceleiden-crm
|
36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c
|
[
"Apache-2.0"
] | 5
|
2019-01-21T15:47:26.000Z
|
2021-09-22T07:14:34.000Z
|
from django.core.management.base import BaseCommand, CommandError
from simple_history.models import HistoricalRecords
from members.models import User
from members.models import User
from django.contrib.auth.forms import PasswordResetForm
from django.conf import settings
from django.core.mail import EmailMessage
import sys, os
from datetime import datetime
"""
Sent invites; to just one user, or all users
in the system,
"""
def reset_password(
email,
reset=False,
from_email=settings.DEFAULT_FROM_EMAIL,
template="members/email_invite.txt",
subject_template="members/email_invite_subject.txt",
):
try:
user = User.objects.get(email=email)
except Exception as e:
print("No user with email address <{}> found.".format(email), file=sys.stderr)
return False
if reset:
# Set it to an unguessable one - as unusable blocks email sending.
# user.set_unusable_password()
user.set_password(User.objects.make_random_password())
user.changeReason = "Locked it from the sent-invite command."
user.save()
form = PasswordResetForm({"email": email})
if not form.is_valid():
raise Exception("Eh - internal issues")
try:
form.save(
from_email=from_email,
email_template_name=template,
subject_template_name=subject_template,
)
print("{} - Email sent.".format(email))
except Exception as e:
print("Sending to <{}> failed: {}".format(email, e), file=sys.stderr)
return False
return True
class Command(BaseCommand):
help = "Sent invite to email adddress(es) provided - or read them from stdin."
def add_arguments(self, parser):
parser.add_argument("email", nargs="*", type=str)
parser.add_argument(
"--all",
action="store_true",
dest="all",
help="Sent a poll to -everyone-. Ignores anything specified on stdin/arguments",
)
parser.add_argument(
"--reset",
action="store_true",
dest="reset",
help="Also reset/block the current account. So any (old) password will not work any longer.",
)
parser.add_argument(
"--save",
dest="save",
type=str,
help="Save the message as rfc822 blobs rather than sending. Useful as we sort out dkim on the server. Pass the output directory as an argument",
)
parser.add_argument(
"--nevers",
dest="nevers",
action="store_true",
help="Skip people that have logged in at least once. Only valid in conjunction wit the --all options.",
)
def handle(self, *args, **options):
rc = 0
if options["save"]:
settings.EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
settings.EMAIL_FILE_PATH = options["save"]
if options["all"]:
if options["email"]:
print(
"The option --all cannot be used with additional emails specified as arguments.",
file=sys.stderr,
)
rc = 1
else:
for user in User.objects.all():
if options["nevers"] and user.last_login:
print(
"Skipping - login {} seen {}".format(
user.name, user.last_login.strftime("%Y-%m-%d %H:%M:%S")
)
)
continue
rc |= not reset_password(user.email, options["reset"])
elif options["email"]:
for email in options["email"]:
rc |= not reset_password(email, options["reset"])
else:
for email in sys.stdin:
rc |= not reset_password(email, options["reset"])
# if options['save']:
# for f in os.listdir(options['save']):
# print(f)
sys.exit(rc)
| 33.16129
| 156
| 0.56785
|
from django.core.management.base import BaseCommand, CommandError
from simple_history.models import HistoricalRecords
from members.models import User
from members.models import User
from django.contrib.auth.forms import PasswordResetForm
from django.conf import settings
from django.core.mail import EmailMessage
import sys, os
from datetime import datetime
def reset_password(
email,
reset=False,
from_email=settings.DEFAULT_FROM_EMAIL,
template="members/email_invite.txt",
subject_template="members/email_invite_subject.txt",
):
try:
user = User.objects.get(email=email)
except Exception as e:
print("No user with email address <{}> found.".format(email), file=sys.stderr)
return False
if reset:
user.set_password(User.objects.make_random_password())
user.changeReason = "Locked it from the sent-invite command."
user.save()
form = PasswordResetForm({"email": email})
if not form.is_valid():
raise Exception("Eh - internal issues")
try:
form.save(
from_email=from_email,
email_template_name=template,
subject_template_name=subject_template,
)
print("{} - Email sent.".format(email))
except Exception as e:
print("Sending to <{}> failed: {}".format(email, e), file=sys.stderr)
return False
return True
class Command(BaseCommand):
help = "Sent invite to email adddress(es) provided - or read them from stdin."
def add_arguments(self, parser):
parser.add_argument("email", nargs="*", type=str)
parser.add_argument(
"--all",
action="store_true",
dest="all",
help="Sent a poll to -everyone-. Ignores anything specified on stdin/arguments",
)
parser.add_argument(
"--reset",
action="store_true",
dest="reset",
help="Also reset/block the current account. So any (old) password will not work any longer.",
)
parser.add_argument(
"--save",
dest="save",
type=str,
help="Save the message as rfc822 blobs rather than sending. Useful as we sort out dkim on the server. Pass the output directory as an argument",
)
parser.add_argument(
"--nevers",
dest="nevers",
action="store_true",
help="Skip people that have logged in at least once. Only valid in conjunction wit the --all options.",
)
def handle(self, *args, **options):
rc = 0
if options["save"]:
settings.EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
settings.EMAIL_FILE_PATH = options["save"]
if options["all"]:
if options["email"]:
print(
"The option --all cannot be used with additional emails specified as arguments.",
file=sys.stderr,
)
rc = 1
else:
for user in User.objects.all():
if options["nevers"] and user.last_login:
print(
"Skipping - login {} seen {}".format(
user.name, user.last_login.strftime("%Y-%m-%d %H:%M:%S")
)
)
continue
rc |= not reset_password(user.email, options["reset"])
elif options["email"]:
for email in options["email"]:
rc |= not reset_password(email, options["reset"])
else:
for email in sys.stdin:
rc |= not reset_password(email, options["reset"])
sys.exit(rc)
| true
| true
|
f7159115d342958270b72c812e03dd46e1a80fe8
| 23,723
|
py
|
Python
|
src/experiment_collection_core/service_pb2.py
|
AsciiShell/experiment_collection
|
86397cae1427c49f30e8af2d6dfb7a15c3f3494d
|
[
"MIT"
] | 2
|
2020-09-30T21:42:35.000Z
|
2020-11-21T17:58:40.000Z
|
src/experiment_collection_core/service_pb2.py
|
AsciiShell/experiment_collection
|
86397cae1427c49f30e8af2d6dfb7a15c3f3494d
|
[
"MIT"
] | null | null | null |
src/experiment_collection_core/service_pb2.py
|
AsciiShell/experiment_collection
|
86397cae1427c49f30e8af2d6dfb7a15c3f3494d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: experiment_collection_core/service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='experiment_collection_core/service.proto',
package='',
syntax='proto3',
serialized_options=b'\n\032experiment_collection_core',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n(experiment_collection_core/service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"e\n\nExperiment\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06params\x18\x03 \x01(\t\x12\x0f\n\x07metrics\x18\x04 \x01(\t\"H\n\x10SimpleExperiment\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x12\n\nexperiment\x18\x03 \x01(\t\"3\n\x0fSimpleNamespace\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"\x1c\n\x0bSimpleToken\x12\r\n\x05token\x18\x01 \x01(\t\",\n\x0bSimpleReply\x12\x0e\n\x06status\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"R\n\rAddExperiment\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x1f\n\nexperiment\x18\x03 \x01(\x0b\x32\x0b.Experiment\"S\n\x10\x45xperimentsReply\x12\x0e\n\x06status\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12 \n\x0b\x65xperiments\x18\x03 \x03(\x0b\x32\x0b.Experiment\"K\n\x12GrantAccessRequest\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x13\n\x0bother_token\x18\x03 \x01(\t\"b\n\x18ReserveExperimentRequest\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x12\n\nexperiment\x18\x03 \x01(\t\x12\x10\n\x08\x64uration\x18\x04 \x01(\r2\xc3\x03\n\x11\x45xperimentService\x12\x32\n\x10\x43reateExperiment\x12\x0e.AddExperiment\x1a\x0c.SimpleReply\"\x00\x12>\n\x11ReserveExperiment\x12\x19.ReserveExperimentRequest\x1a\x0c.SimpleReply\"\x00\x12\x35\n\x10\x44\x65leteExperiment\x12\x11.SimpleExperiment\x1a\x0c.SimpleReply\"\x00\x12\x34\n\x0f\x43heckExperiment\x12\x11.SimpleExperiment\x1a\x0c.SimpleReply\"\x00\x12\x37\n\x0eGetExperiments\x12\x10.SimpleNamespace\x1a\x11.ExperimentsReply\"\x00\x12\x33\n\x0f\x43reateNamespace\x12\x10.SimpleNamespace\x1a\x0c.SimpleReply\"\x00\x12+\n\x0bRevokeToken\x12\x0c.SimpleToken\x1a\x0c.SimpleReply\"\x00\x12\x32\n\x0bGrantAccess\x12\x13.GrantAccessRequest\x1a\x0c.SimpleReply\"\x00\x42\x1c\n\x1a\x65xperiment_collection_coreb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Experiment.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='Experiment.time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='params', full_name='Experiment.params', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metrics', full_name='Experiment.metrics', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=178,
)
_SIMPLEEXPERIMENT = _descriptor.Descriptor(
name='SimpleExperiment',
full_name='SimpleExperiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleExperiment.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='SimpleExperiment.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='SimpleExperiment.experiment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=252,
)
_SIMPLENAMESPACE = _descriptor.Descriptor(
name='SimpleNamespace',
full_name='SimpleNamespace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleNamespace.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='SimpleNamespace.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=254,
serialized_end=305,
)
_SIMPLETOKEN = _descriptor.Descriptor(
name='SimpleToken',
full_name='SimpleToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleToken.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=307,
serialized_end=335,
)
_SIMPLEREPLY = _descriptor.Descriptor(
name='SimpleReply',
full_name='SimpleReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='SimpleReply.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='SimpleReply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=337,
serialized_end=381,
)
_ADDEXPERIMENT = _descriptor.Descriptor(
name='AddExperiment',
full_name='AddExperiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='AddExperiment.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='AddExperiment.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='AddExperiment.experiment', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=465,
)
_EXPERIMENTSREPLY = _descriptor.Descriptor(
name='ExperimentsReply',
full_name='ExperimentsReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ExperimentsReply.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='ExperimentsReply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiments', full_name='ExperimentsReply.experiments', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=467,
serialized_end=550,
)
_GRANTACCESSREQUEST = _descriptor.Descriptor(
name='GrantAccessRequest',
full_name='GrantAccessRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='GrantAccessRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='GrantAccessRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='other_token', full_name='GrantAccessRequest.other_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=627,
)
_RESERVEEXPERIMENTREQUEST = _descriptor.Descriptor(
name='ReserveExperimentRequest',
full_name='ReserveExperimentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='ReserveExperimentRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='ReserveExperimentRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='ReserveExperimentRequest.experiment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='ReserveExperimentRequest.duration', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=727,
)
_EXPERIMENT.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ADDEXPERIMENT.fields_by_name['experiment'].message_type = _EXPERIMENT
_EXPERIMENTSREPLY.fields_by_name['experiments'].message_type = _EXPERIMENT
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
DESCRIPTOR.message_types_by_name['SimpleExperiment'] = _SIMPLEEXPERIMENT
DESCRIPTOR.message_types_by_name['SimpleNamespace'] = _SIMPLENAMESPACE
DESCRIPTOR.message_types_by_name['SimpleToken'] = _SIMPLETOKEN
DESCRIPTOR.message_types_by_name['SimpleReply'] = _SIMPLEREPLY
DESCRIPTOR.message_types_by_name['AddExperiment'] = _ADDEXPERIMENT
DESCRIPTOR.message_types_by_name['ExperimentsReply'] = _EXPERIMENTSREPLY
DESCRIPTOR.message_types_by_name['GrantAccessRequest'] = _GRANTACCESSREQUEST
DESCRIPTOR.message_types_by_name['ReserveExperimentRequest'] = _RESERVEEXPERIMENTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
})
_sym_db.RegisterMessage(Experiment)
SimpleExperiment = _reflection.GeneratedProtocolMessageType('SimpleExperiment', (_message.Message,), {
'DESCRIPTOR' : _SIMPLEEXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleExperiment)
})
_sym_db.RegisterMessage(SimpleExperiment)
SimpleNamespace = _reflection.GeneratedProtocolMessageType('SimpleNamespace', (_message.Message,), {
'DESCRIPTOR' : _SIMPLENAMESPACE,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleNamespace)
})
_sym_db.RegisterMessage(SimpleNamespace)
SimpleToken = _reflection.GeneratedProtocolMessageType('SimpleToken', (_message.Message,), {
'DESCRIPTOR' : _SIMPLETOKEN,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleToken)
})
_sym_db.RegisterMessage(SimpleToken)
SimpleReply = _reflection.GeneratedProtocolMessageType('SimpleReply', (_message.Message,), {
'DESCRIPTOR' : _SIMPLEREPLY,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleReply)
})
_sym_db.RegisterMessage(SimpleReply)
AddExperiment = _reflection.GeneratedProtocolMessageType('AddExperiment', (_message.Message,), {
'DESCRIPTOR' : _ADDEXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:AddExperiment)
})
_sym_db.RegisterMessage(AddExperiment)
ExperimentsReply = _reflection.GeneratedProtocolMessageType('ExperimentsReply', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENTSREPLY,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:ExperimentsReply)
})
_sym_db.RegisterMessage(ExperimentsReply)
GrantAccessRequest = _reflection.GeneratedProtocolMessageType('GrantAccessRequest', (_message.Message,), {
'DESCRIPTOR' : _GRANTACCESSREQUEST,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:GrantAccessRequest)
})
_sym_db.RegisterMessage(GrantAccessRequest)
ReserveExperimentRequest = _reflection.GeneratedProtocolMessageType('ReserveExperimentRequest', (_message.Message,), {
'DESCRIPTOR' : _RESERVEEXPERIMENTREQUEST,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:ReserveExperimentRequest)
})
_sym_db.RegisterMessage(ReserveExperimentRequest)
DESCRIPTOR._options = None
_EXPERIMENTSERVICE = _descriptor.ServiceDescriptor(
name='ExperimentService',
full_name='ExperimentService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=730,
serialized_end=1181,
methods=[
_descriptor.MethodDescriptor(
name='CreateExperiment',
full_name='ExperimentService.CreateExperiment',
index=0,
containing_service=None,
input_type=_ADDEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ReserveExperiment',
full_name='ExperimentService.ReserveExperiment',
index=1,
containing_service=None,
input_type=_RESERVEEXPERIMENTREQUEST,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteExperiment',
full_name='ExperimentService.DeleteExperiment',
index=2,
containing_service=None,
input_type=_SIMPLEEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CheckExperiment',
full_name='ExperimentService.CheckExperiment',
index=3,
containing_service=None,
input_type=_SIMPLEEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetExperiments',
full_name='ExperimentService.GetExperiments',
index=4,
containing_service=None,
input_type=_SIMPLENAMESPACE,
output_type=_EXPERIMENTSREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateNamespace',
full_name='ExperimentService.CreateNamespace',
index=5,
containing_service=None,
input_type=_SIMPLENAMESPACE,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RevokeToken',
full_name='ExperimentService.RevokeToken',
index=6,
containing_service=None,
input_type=_SIMPLETOKEN,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GrantAccess',
full_name='ExperimentService.GrantAccess',
index=7,
containing_service=None,
input_type=_GRANTACCESSREQUEST,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_EXPERIMENTSERVICE)
DESCRIPTOR.services_by_name['ExperimentService'] = _EXPERIMENTSERVICE
# @@protoc_insertion_point(module_scope)
| 39.21157
| 2,040
| 0.75842
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='experiment_collection_core/service.proto',
package='',
syntax='proto3',
serialized_options=b'\n\032experiment_collection_core',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n(experiment_collection_core/service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"e\n\nExperiment\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06params\x18\x03 \x01(\t\x12\x0f\n\x07metrics\x18\x04 \x01(\t\"H\n\x10SimpleExperiment\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x12\n\nexperiment\x18\x03 \x01(\t\"3\n\x0fSimpleNamespace\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"\x1c\n\x0bSimpleToken\x12\r\n\x05token\x18\x01 \x01(\t\",\n\x0bSimpleReply\x12\x0e\n\x06status\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"R\n\rAddExperiment\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x1f\n\nexperiment\x18\x03 \x01(\x0b\x32\x0b.Experiment\"S\n\x10\x45xperimentsReply\x12\x0e\n\x06status\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12 \n\x0b\x65xperiments\x18\x03 \x03(\x0b\x32\x0b.Experiment\"K\n\x12GrantAccessRequest\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x13\n\x0bother_token\x18\x03 \x01(\t\"b\n\x18ReserveExperimentRequest\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x12\n\nexperiment\x18\x03 \x01(\t\x12\x10\n\x08\x64uration\x18\x04 \x01(\r2\xc3\x03\n\x11\x45xperimentService\x12\x32\n\x10\x43reateExperiment\x12\x0e.AddExperiment\x1a\x0c.SimpleReply\"\x00\x12>\n\x11ReserveExperiment\x12\x19.ReserveExperimentRequest\x1a\x0c.SimpleReply\"\x00\x12\x35\n\x10\x44\x65leteExperiment\x12\x11.SimpleExperiment\x1a\x0c.SimpleReply\"\x00\x12\x34\n\x0f\x43heckExperiment\x12\x11.SimpleExperiment\x1a\x0c.SimpleReply\"\x00\x12\x37\n\x0eGetExperiments\x12\x10.SimpleNamespace\x1a\x11.ExperimentsReply\"\x00\x12\x33\n\x0f\x43reateNamespace\x12\x10.SimpleNamespace\x1a\x0c.SimpleReply\"\x00\x12+\n\x0bRevokeToken\x12\x0c.SimpleToken\x1a\x0c.SimpleReply\"\x00\x12\x32\n\x0bGrantAccess\x12\x13.GrantAccessRequest\x1a\x0c.SimpleReply\"\x00\x42\x1c\n\x1a\x65xperiment_collection_coreb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Experiment.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='Experiment.time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='params', full_name='Experiment.params', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metrics', full_name='Experiment.metrics', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=178,
)
_SIMPLEEXPERIMENT = _descriptor.Descriptor(
name='SimpleExperiment',
full_name='SimpleExperiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleExperiment.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='SimpleExperiment.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='SimpleExperiment.experiment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=252,
)
_SIMPLENAMESPACE = _descriptor.Descriptor(
name='SimpleNamespace',
full_name='SimpleNamespace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleNamespace.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='SimpleNamespace.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=254,
serialized_end=305,
)
_SIMPLETOKEN = _descriptor.Descriptor(
name='SimpleToken',
full_name='SimpleToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleToken.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=307,
serialized_end=335,
)
_SIMPLEREPLY = _descriptor.Descriptor(
name='SimpleReply',
full_name='SimpleReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='SimpleReply.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='SimpleReply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=337,
serialized_end=381,
)
_ADDEXPERIMENT = _descriptor.Descriptor(
name='AddExperiment',
full_name='AddExperiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='AddExperiment.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='AddExperiment.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='AddExperiment.experiment', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=465,
)
_EXPERIMENTSREPLY = _descriptor.Descriptor(
name='ExperimentsReply',
full_name='ExperimentsReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ExperimentsReply.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='ExperimentsReply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiments', full_name='ExperimentsReply.experiments', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=467,
serialized_end=550,
)
_GRANTACCESSREQUEST = _descriptor.Descriptor(
name='GrantAccessRequest',
full_name='GrantAccessRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='GrantAccessRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='GrantAccessRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='other_token', full_name='GrantAccessRequest.other_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=627,
)
_RESERVEEXPERIMENTREQUEST = _descriptor.Descriptor(
name='ReserveExperimentRequest',
full_name='ReserveExperimentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='ReserveExperimentRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='ReserveExperimentRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='ReserveExperimentRequest.experiment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='ReserveExperimentRequest.duration', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=727,
)
_EXPERIMENT.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ADDEXPERIMENT.fields_by_name['experiment'].message_type = _EXPERIMENT
_EXPERIMENTSREPLY.fields_by_name['experiments'].message_type = _EXPERIMENT
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
DESCRIPTOR.message_types_by_name['SimpleExperiment'] = _SIMPLEEXPERIMENT
DESCRIPTOR.message_types_by_name['SimpleNamespace'] = _SIMPLENAMESPACE
DESCRIPTOR.message_types_by_name['SimpleToken'] = _SIMPLETOKEN
DESCRIPTOR.message_types_by_name['SimpleReply'] = _SIMPLEREPLY
DESCRIPTOR.message_types_by_name['AddExperiment'] = _ADDEXPERIMENT
DESCRIPTOR.message_types_by_name['ExperimentsReply'] = _EXPERIMENTSREPLY
DESCRIPTOR.message_types_by_name['GrantAccessRequest'] = _GRANTACCESSREQUEST
DESCRIPTOR.message_types_by_name['ReserveExperimentRequest'] = _RESERVEEXPERIMENTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
})
_sym_db.RegisterMessage(Experiment)
SimpleExperiment = _reflection.GeneratedProtocolMessageType('SimpleExperiment', (_message.Message,), {
'DESCRIPTOR' : _SIMPLEEXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleExperiment)
})
_sym_db.RegisterMessage(SimpleExperiment)
SimpleNamespace = _reflection.GeneratedProtocolMessageType('SimpleNamespace', (_message.Message,), {
'DESCRIPTOR' : _SIMPLENAMESPACE,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleNamespace)
})
_sym_db.RegisterMessage(SimpleNamespace)
SimpleToken = _reflection.GeneratedProtocolMessageType('SimpleToken', (_message.Message,), {
'DESCRIPTOR' : _SIMPLETOKEN,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleToken)
})
_sym_db.RegisterMessage(SimpleToken)
SimpleReply = _reflection.GeneratedProtocolMessageType('SimpleReply', (_message.Message,), {
'DESCRIPTOR' : _SIMPLEREPLY,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleReply)
})
_sym_db.RegisterMessage(SimpleReply)
AddExperiment = _reflection.GeneratedProtocolMessageType('AddExperiment', (_message.Message,), {
'DESCRIPTOR' : _ADDEXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:AddExperiment)
})
_sym_db.RegisterMessage(AddExperiment)
ExperimentsReply = _reflection.GeneratedProtocolMessageType('ExperimentsReply', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENTSREPLY,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:ExperimentsReply)
})
_sym_db.RegisterMessage(ExperimentsReply)
GrantAccessRequest = _reflection.GeneratedProtocolMessageType('GrantAccessRequest', (_message.Message,), {
'DESCRIPTOR' : _GRANTACCESSREQUEST,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:GrantAccessRequest)
})
_sym_db.RegisterMessage(GrantAccessRequest)
ReserveExperimentRequest = _reflection.GeneratedProtocolMessageType('ReserveExperimentRequest', (_message.Message,), {
'DESCRIPTOR' : _RESERVEEXPERIMENTREQUEST,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:ReserveExperimentRequest)
})
_sym_db.RegisterMessage(ReserveExperimentRequest)
DESCRIPTOR._options = None
_EXPERIMENTSERVICE = _descriptor.ServiceDescriptor(
name='ExperimentService',
full_name='ExperimentService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=730,
serialized_end=1181,
methods=[
_descriptor.MethodDescriptor(
name='CreateExperiment',
full_name='ExperimentService.CreateExperiment',
index=0,
containing_service=None,
input_type=_ADDEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ReserveExperiment',
full_name='ExperimentService.ReserveExperiment',
index=1,
containing_service=None,
input_type=_RESERVEEXPERIMENTREQUEST,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteExperiment',
full_name='ExperimentService.DeleteExperiment',
index=2,
containing_service=None,
input_type=_SIMPLEEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CheckExperiment',
full_name='ExperimentService.CheckExperiment',
index=3,
containing_service=None,
input_type=_SIMPLEEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetExperiments',
full_name='ExperimentService.GetExperiments',
index=4,
containing_service=None,
input_type=_SIMPLENAMESPACE,
output_type=_EXPERIMENTSREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateNamespace',
full_name='ExperimentService.CreateNamespace',
index=5,
containing_service=None,
input_type=_SIMPLENAMESPACE,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RevokeToken',
full_name='ExperimentService.RevokeToken',
index=6,
containing_service=None,
input_type=_SIMPLETOKEN,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GrantAccess',
full_name='ExperimentService.GrantAccess',
index=7,
containing_service=None,
input_type=_GRANTACCESSREQUEST,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_EXPERIMENTSERVICE)
DESCRIPTOR.services_by_name['ExperimentService'] = _EXPERIMENTSERVICE
# @@protoc_insertion_point(module_scope)
| true
| true
|
f715925d591bd9957fdc6799ded885a4c997bb33
| 6,877
|
py
|
Python
|
p7/MIPSMicroSystem/my_files/test/auto_test.py
|
t0ush1/ComputerOrganization
|
8093949bbd3e48678cea832133e9bf8990bbdf27
|
[
"MIT"
] | 2
|
2022-03-06T06:05:24.000Z
|
2022-03-10T09:08:08.000Z
|
p7/MIPSMicroSystem/my_files/test/auto_test.py
|
t0ush1/ComputerOrganization
|
8093949bbd3e48678cea832133e9bf8990bbdf27
|
[
"MIT"
] | null | null | null |
p7/MIPSMicroSystem/my_files/test/auto_test.py
|
t0ush1/ComputerOrganization
|
8093949bbd3e48678cea832133e9bf8990bbdf27
|
[
"MIT"
] | null | null | null |
#############################################################
# win10 64bit
# python 3.9.6
#
# author: toush1 (20373944 he tianran)
#############################################################
import os
import re
# software path
xilinxPath = "G:\\ISE\\ise\\14.7\\ISE_DS\\ISE\\"
marsPath = "G:\\mars\\Mars_test.jar"
# prj path and test mode
myPrjPath = "D:\\study\\CO\\p7\\MIPSMicroSystem\\"
otherPrjPath = "D:\\study\\CO\\p7\\szxCPU\\"
start = 0
tot = 1
interrupt = 0x301c # if 0 not interrupt; if -1 interrupt all; if 0x3000 interrupt at 0x3000
# dump text and handler (and run in Mars)
def runMars(asm, codeFilePath, out):
path = os.path.dirname(codeFilePath) + "\\"
code = path + "code.tmp"
handler = path + "handler.tmp"
os.system("java -jar " + marsPath + " db nc mc CompactDataAtZero a dump .text HexText " + code + " " + asm)
os.system("java -jar " + marsPath + " db nc mc CompactDataAtZero a dump 0x00004180-0x00005180 HexText " + handler + " " + asm)
# os.system("java -jar " + marsPath + " " + asm + " 4096 db nc mc CompactDataAtZero > " + out)
with open(code, "r") as codeSrc, open(handler, "r") as handlerSrc, open(codeFilePath, "w") as codeDst:
codeText = codeSrc.read()
textLen = len(codeText.splitlines())
codeDst.write(codeText)
for i in range(len(codeText.splitlines()), 1120):
codeDst.write("00000000\n")
codeDst.write(handlerSrc.read())
os.remove(code)
os.remove(handler)
return textLen
# gnrt prj and tcl file
def initISE(prj):
verilogPath = prj + "my_files\\cpu\\"
prjFilePath = prj + "mips.prj"
tclFilePath = prj + "mips.tcl"
with open(prjFilePath, "w") as prjFile, open(tclFilePath, "w") as tclFile:
for root, dirs, files in os.walk(verilogPath):
for fileName in files:
if re.match(r"[\w]*\.v", fileName):
prjFile.write("Verilog work " + root + "\\" + fileName + "\n")
tclFile.write("run 200us" + "\n" + "exit")
# change interrupt position in testbench
def changeIntPos(tbPath, intPos):
text = ""
with open(tbPath, "r") as testbench:
text = testbench.read()
if intPos == 0:
text = text.replace("need_interrupt = 1", "need_interrupt = 0")
else:
text = text.replace("need_interrupt = 0", "need_interrupt = 1")
text = re.sub(r"fixed_macroscopic_pc == 32'h[0-9a-f]+",
"fixed_macroscopic_pc == 32'h" + str(hex(intPos)).removeprefix("0x"), text)
with open(tbPath, "w") as testbench:
testbench.write(text)
# compile and run in ISE
def runISE(prj, out):
prjFilePath = prj + "mips.prj"
tclFilePath = prj + "mips.tcl"
exeFilePath = prj + "mips.exe"
logFilePath = prj + "log.txt"
os.chdir(prj)
os.environ['XILINX'] = xilinxPath
os.system(xilinxPath + "bin\\nt64\\fuse -nodebug -prj " + prjFilePath + " -o " + exeFilePath + " mips_tb > " + logFilePath)
os.system(exeFilePath + " -nolog -tclbatch " + tclFilePath + " > " + out)
# cmp myAns and stdAns
def cmp(interrupt, my, std, cmpRes):
with open(my, "r") as myFile, open(std, "r") as stdFile, open(cmpRes, "a") as out:
myLogs = re.findall("\@[^\n]*", myFile.read())
stdLogs = re.findall("\@[^\n]*", stdFile.read())
if interrupt != 0:
out.write("interrupt at " + str(hex(interrupt)) + " : \n")
print("interrupt at " + str(hex(interrupt)) + " : ")
else:
out.write("no interrupt : \n")
print("no interrupt : ")
for i in range(len(stdLogs)):
if i < len(myLogs) and myLogs[i] != stdLogs[i]:
out.write("\tOn Line " + str(i+1) + "\n")
out.write("\tGet\t\t: " + myLogs[i] + "\n")
out.write("\tExpect\t: " + stdLogs[i] + "\n")
print("\tOn Line " + str(i+1))
print("\tGet\t: " + myLogs[i])
print("\tExpect\t: " + stdLogs[i])
return False
elif i >= len(myLogs):
out.write("\tmyLogs is too short\n")
print("\tmyLogs is too short")
return False
if len(myLogs) > len(stdLogs):
out.write("\tmyLogs is too long\n")
print("\tmyLogs is too long")
return False
return True
# main
initISE(myPrjPath)
initISE(otherPrjPath)
testdataPath = myPrjPath + "my_files\\test\\data\\"
cmpResPath = testdataPath + "cmp_res.txt"
myTbPath = myPrjPath + "my_files\\cpu\\mips_tb.v"
otherTbPath = otherPrjPath + "my_files\\cpu\\mips_tb.v"
if os.path.exists(cmpResPath):
os.remove(cmpResPath)
for i in range(start, start + tot):
testpointPath = testdataPath + "testpoint\\testpoint" + str(i) + ".asm"
codePath = testdataPath + "code\\code" + str(i) + ".txt"
stdAnsPath = testdataPath + "std_ans\\std_ans" + str(i) + ".txt"
testAnsPath = testdataPath + "test_ans\\test_ans" + str(i) + ".txt"
textLen = runMars(testpointPath, codePath, stdAnsPath) - 4
with open(codePath, "r") as codeSrc, open(myPrjPath + "code.txt", "w") as codeDst1, open(otherPrjPath + "code.txt", "w") as codeDst2:
code = codeSrc.read()
codeDst1.write(code)
codeDst2.write(code)
with open(cmpResPath, "a") as out:
out.write("\n----------------------------------------------------------------\n")
out.write("\nin testpoint" + str(i) + " : \n\n")
print("\n----------------------------------------------------------------")
print("\nin testpoint" + str(i) + " : \n")
isAC = True
if interrupt == 0:
changeIntPos(myTbPath, 0)
changeIntPos(otherTbPath, 0)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
isAC = cmp(0, testAnsPath, stdAnsPath, cmpResPath)
elif interrupt == -1:
for j in range(1, textLen):
intPos = j * 4 + 0x3000
changeIntPos(myTbPath, intPos)
changeIntPos(otherTbPath, intPos)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
if not cmp(intPos, testAnsPath, stdAnsPath, cmpResPath):
isAC = False
break
else:
changeIntPos(myTbPath, interrupt)
changeIntPos(otherTbPath, interrupt)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
isAC = cmp(interrupt, testAnsPath, stdAnsPath, cmpResPath)
if isAC:
with open(cmpResPath, "a") as out:
out.write("\n\tAll Accepted\n")
print("\n\tAll Accepted")
print("\n----------------------------------------------------------------")
| 40.452941
| 138
| 0.54137
|
out.write("\tmyLogs is too long\n")
print("\tmyLogs is too long")
return False
return True
initISE(myPrjPath)
initISE(otherPrjPath)
testdataPath = myPrjPath + "my_files\\test\\data\\"
cmpResPath = testdataPath + "cmp_res.txt"
myTbPath = myPrjPath + "my_files\\cpu\\mips_tb.v"
otherTbPath = otherPrjPath + "my_files\\cpu\\mips_tb.v"
if os.path.exists(cmpResPath):
os.remove(cmpResPath)
for i in range(start, start + tot):
testpointPath = testdataPath + "testpoint\\testpoint" + str(i) + ".asm"
codePath = testdataPath + "code\\code" + str(i) + ".txt"
stdAnsPath = testdataPath + "std_ans\\std_ans" + str(i) + ".txt"
testAnsPath = testdataPath + "test_ans\\test_ans" + str(i) + ".txt"
textLen = runMars(testpointPath, codePath, stdAnsPath) - 4
with open(codePath, "r") as codeSrc, open(myPrjPath + "code.txt", "w") as codeDst1, open(otherPrjPath + "code.txt", "w") as codeDst2:
code = codeSrc.read()
codeDst1.write(code)
codeDst2.write(code)
with open(cmpResPath, "a") as out:
out.write("\n----------------------------------------------------------------\n")
out.write("\nin testpoint" + str(i) + " : \n\n")
print("\n----------------------------------------------------------------")
print("\nin testpoint" + str(i) + " : \n")
isAC = True
if interrupt == 0:
changeIntPos(myTbPath, 0)
changeIntPos(otherTbPath, 0)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
isAC = cmp(0, testAnsPath, stdAnsPath, cmpResPath)
elif interrupt == -1:
for j in range(1, textLen):
intPos = j * 4 + 0x3000
changeIntPos(myTbPath, intPos)
changeIntPos(otherTbPath, intPos)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
if not cmp(intPos, testAnsPath, stdAnsPath, cmpResPath):
isAC = False
break
else:
changeIntPos(myTbPath, interrupt)
changeIntPos(otherTbPath, interrupt)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
isAC = cmp(interrupt, testAnsPath, stdAnsPath, cmpResPath)
if isAC:
with open(cmpResPath, "a") as out:
out.write("\n\tAll Accepted\n")
print("\n\tAll Accepted")
print("\n----------------------------------------------------------------")
| true
| true
|
f715928065109e697649bf15722ccc0e6c0edfa4
| 7,114
|
py
|
Python
|
test/functional/tests/fault_injection/test_cache_insert_error.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | null | null | null |
test/functional/tests/fault_injection/test_cache_insert_error.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-21T22:05:26.000Z
|
2022-03-21T22:05:26.000Z
|
test/functional/tests/fault_injection/test_cache_insert_error.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from api.cas import casadm
from api.cas.cache_config import (
CacheMode,
CacheLineSize,
SeqCutOffPolicy,
CleaningPolicy,
CacheStatus,
)
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.device_mapper import ErrorDevice, DmTable
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine, ErrorFilter, VerifyMethod
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_insert_error(cache_mode, cache_line_size):
"""
title: Cache insert test with error device
description: |
Validate CAS ability to handle write errors while it tries to insert
cache lines. For lazy writes cache modes (WO, WB) issue only reads.
pass_criteria:
- No I/O errors returned to the user
- Cache write error statistics are counted properly
- No cache line gets inserted into cache
"""
with TestRun.step("Prepare core and cache"):
cache, core, core_device = prepare_configuration(cache_mode, cache_line_size)
fio_cmd = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(core.size)
.block_size(cache_line_size)
.target(core)
.direct()
)
if cache_mode in [CacheMode.WB, CacheMode.WO]:
fio_cmd = fio_cmd.read_write(ReadWrite.randread)
else:
fio_cmd = fio_cmd.read_write(ReadWrite.randrw).verify_pattern().verify(VerifyMethod.pattern)
with TestRun.step("Run fio and verify no errors present"):
fio_errors = fio_cmd.run()[0].total_errors()
if fio_errors != 0:
TestRun.fail(f"Some I/O ended with errors {fio_errors}")
with TestRun.step("Check error statistics on cache"):
stats = cache.get_statistics()
occupancy = cache.get_occupancy().get_value()
if occupancy != 0:
TestRun.fail(f"Occupancy is not zero, but {occupancy}")
cache_writes = stats.block_stats.cache.writes / cache_line_size.value
cache_errors = stats.error_stats.cache.total
if cache_writes != cache_errors:
TestRun.fail(
f"Cache errors ({cache_errors}) should equal to number of"
f" requests to cache ({cache_writes})"
)
if cache_mode not in [CacheMode.WB, CacheMode.WO]:
with TestRun.step("Verify core device contents for non-lazy-writes cache modes"):
cache.stop()
fio_cmd.target(core_device).verify_only().run()
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WO])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_write_lazy_insert_error(cache_mode, cache_line_size):
"""
title: Cache insert test with error device for writes on lazy writes cache mode
description: |
Validate CAS ability to handle write errors while it tries to insert
cache lines. This test is exclusively for lazy writes cache modes.
pass_criteria:
- I/O errors returned to user
- Cache automatically stops after encountering errors
- No cache line gets inserted into cache
"""
with TestRun.step("Prepare core and cache"):
cache, core, _ = prepare_configuration(cache_mode, cache_line_size)
with TestRun.step("Run fio and verify errors are present"):
fio_errors = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(core.size)
.block_size(cache_line_size)
.read_write(ReadWrite.randwrite)
.target(core)
.continue_on_error(ErrorFilter.io)
.direct()
.run()[0]
.total_errors()
)
if fio_errors == 0:
TestRun.fail(f"No I/O ended with error")
with TestRun.step("Check error statistics and state on cache"):
stats = cache.get_statistics()
occupancy = cache.get_occupancy().get_value()
if occupancy != 0:
TestRun.fail(f"Occupancy is not zero, but {occupancy}")
cache_writes = stats.block_stats.cache.writes / cache_line_size.value
cache_errors = stats.error_stats.cache.total
if cache_writes != 1:
TestRun.fail(f"There only should be one cache write attempt before cache stop")
if cache_writes != cache_errors:
TestRun.fail(
f"Cache errors ({cache_errors}) should equal to number of requests to"
f" cache ({cache_writes})"
)
state = cache.get_status()
if state != CacheStatus.not_running:
TestRun.fail(f"Cache should be in 'Not running' state, and it's {state}")
def prepare_configuration(cache_mode, cache_line_size):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
with TestRun.step("Creating cache partition"):
cache_device.create_partitions([Size(50, Unit.MebiByte)])
with TestRun.step("Creating cache error device"):
error_device = ErrorDevice("error", cache_device.partitions[0])
with TestRun.step("Starting cache to check metadata offset"):
cache = casadm.start_cache(error_device, cache_line_size=cache_line_size, force=True)
cache_size = cache.size
cache.stop()
with TestRun.step("Setting errors on non-metadata area"):
error_device.change_table(
DmTable.error_table(
offset=(cache_device.partitions[0].size - cache_size).get_value(Unit.Blocks512),
size=cache_size,
).fill_gaps(cache_device.partitions[0])
)
with TestRun.step("Create core partition with size of usable cache space"):
core_device.create_partitions([cache_size])
with TestRun.step("Starting and configuring cache"):
cache = casadm.start_cache(
error_device, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True
)
result = cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
if result.exit_code:
TestRun.LOGGER.exception("Couldn't set seq cutoff policy")
result = cache.set_cleaning_policy(CleaningPolicy.nop)
if result.exit_code:
TestRun.LOGGER.exception("Couldn't set cleaning policy")
with TestRun.step("Stopping udev"):
Udev.disable()
with TestRun.step("Adding core device"):
core = cache.add_core(core_dev=core_device.partitions[0])
return cache, core, core_device.partitions[0]
| 38.247312
| 100
| 0.66826
|
import pytest
from api.cas import casadm
from api.cas.cache_config import (
CacheMode,
CacheLineSize,
SeqCutOffPolicy,
CleaningPolicy,
CacheStatus,
)
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.device_mapper import ErrorDevice, DmTable
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine, ErrorFilter, VerifyMethod
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_insert_error(cache_mode, cache_line_size):
with TestRun.step("Prepare core and cache"):
cache, core, core_device = prepare_configuration(cache_mode, cache_line_size)
fio_cmd = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(core.size)
.block_size(cache_line_size)
.target(core)
.direct()
)
if cache_mode in [CacheMode.WB, CacheMode.WO]:
fio_cmd = fio_cmd.read_write(ReadWrite.randread)
else:
fio_cmd = fio_cmd.read_write(ReadWrite.randrw).verify_pattern().verify(VerifyMethod.pattern)
with TestRun.step("Run fio and verify no errors present"):
fio_errors = fio_cmd.run()[0].total_errors()
if fio_errors != 0:
TestRun.fail(f"Some I/O ended with errors {fio_errors}")
with TestRun.step("Check error statistics on cache"):
stats = cache.get_statistics()
occupancy = cache.get_occupancy().get_value()
if occupancy != 0:
TestRun.fail(f"Occupancy is not zero, but {occupancy}")
cache_writes = stats.block_stats.cache.writes / cache_line_size.value
cache_errors = stats.error_stats.cache.total
if cache_writes != cache_errors:
TestRun.fail(
f"Cache errors ({cache_errors}) should equal to number of"
f" requests to cache ({cache_writes})"
)
if cache_mode not in [CacheMode.WB, CacheMode.WO]:
with TestRun.step("Verify core device contents for non-lazy-writes cache modes"):
cache.stop()
fio_cmd.target(core_device).verify_only().run()
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WO])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_write_lazy_insert_error(cache_mode, cache_line_size):
with TestRun.step("Prepare core and cache"):
cache, core, _ = prepare_configuration(cache_mode, cache_line_size)
with TestRun.step("Run fio and verify errors are present"):
fio_errors = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(core.size)
.block_size(cache_line_size)
.read_write(ReadWrite.randwrite)
.target(core)
.continue_on_error(ErrorFilter.io)
.direct()
.run()[0]
.total_errors()
)
if fio_errors == 0:
TestRun.fail(f"No I/O ended with error")
with TestRun.step("Check error statistics and state on cache"):
stats = cache.get_statistics()
occupancy = cache.get_occupancy().get_value()
if occupancy != 0:
TestRun.fail(f"Occupancy is not zero, but {occupancy}")
cache_writes = stats.block_stats.cache.writes / cache_line_size.value
cache_errors = stats.error_stats.cache.total
if cache_writes != 1:
TestRun.fail(f"There only should be one cache write attempt before cache stop")
if cache_writes != cache_errors:
TestRun.fail(
f"Cache errors ({cache_errors}) should equal to number of requests to"
f" cache ({cache_writes})"
)
state = cache.get_status()
if state != CacheStatus.not_running:
TestRun.fail(f"Cache should be in 'Not running' state, and it's {state}")
def prepare_configuration(cache_mode, cache_line_size):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
with TestRun.step("Creating cache partition"):
cache_device.create_partitions([Size(50, Unit.MebiByte)])
with TestRun.step("Creating cache error device"):
error_device = ErrorDevice("error", cache_device.partitions[0])
with TestRun.step("Starting cache to check metadata offset"):
cache = casadm.start_cache(error_device, cache_line_size=cache_line_size, force=True)
cache_size = cache.size
cache.stop()
with TestRun.step("Setting errors on non-metadata area"):
error_device.change_table(
DmTable.error_table(
offset=(cache_device.partitions[0].size - cache_size).get_value(Unit.Blocks512),
size=cache_size,
).fill_gaps(cache_device.partitions[0])
)
with TestRun.step("Create core partition with size of usable cache space"):
core_device.create_partitions([cache_size])
with TestRun.step("Starting and configuring cache"):
cache = casadm.start_cache(
error_device, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True
)
result = cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
if result.exit_code:
TestRun.LOGGER.exception("Couldn't set seq cutoff policy")
result = cache.set_cleaning_policy(CleaningPolicy.nop)
if result.exit_code:
TestRun.LOGGER.exception("Couldn't set cleaning policy")
with TestRun.step("Stopping udev"):
Udev.disable()
with TestRun.step("Adding core device"):
core = cache.add_core(core_dev=core_device.partitions[0])
return cache, core, core_device.partitions[0]
| true
| true
|
f71592ac0589f8a0a4e9faf12a0a0f6c0ac061b2
| 2,240
|
py
|
Python
|
importo/fields/html.py
|
torchbox/django-importo
|
57c96951af624d2f6c9128c5689d55f1cc1f7019
|
[
"BSD-3-Clause"
] | 1
|
2021-12-09T15:10:50.000Z
|
2021-12-09T15:10:50.000Z
|
importo/fields/html.py
|
torchbox/django-importo
|
57c96951af624d2f6c9128c5689d55f1cc1f7019
|
[
"BSD-3-Clause"
] | null | null | null |
importo/fields/html.py
|
torchbox/django-importo
|
57c96951af624d2f6c9128c5689d55f1cc1f7019
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Any, Mapping, Sequence
from urllib.parse import unquote_plus
import bleach
from importo.fields.base import Field
from importo.utils.html import tidy_html
class HTMLField(Field):
allowed_tags = [
"a",
"abbr",
"acronym",
"b",
"bdi",
"blockquote",
"cite",
"code",
"dd",
"dl",
"dt",
"em",
"h2",
"h3",
"h4",
"h5",
"i",
"li",
"ol",
"p",
"small",
"span",
"strong",
"ul",
]
allowed_attrs = {
"a": ["class", "href", "target", "title"],
"abbr": ["title"],
"acronym": ["title"],
"cite": ["dir", "lang", "title"],
"span": ["dir", "class", "lang", "title"],
"h2": ["dir", "class", "lang", "title"],
"h3": ["dir", "class", "lang", "title"],
"h4": ["dir", "class", "lang", "title"],
"h5": ["dir", "class", "lang", "title"],
}
def __init__(
self,
*args,
allowed_tags: Sequence[str] = None,
allowed_attrs: Mapping[str, str] = None,
remove_empty_paragraphs: bool = True,
remove_excess_whitespace: bool = True,
remove_linebreaks: bool = False,
**kwargs,
):
if allowed_tags is not None:
self.allowed_tags = allowed_tags
if allowed_attrs is not None:
self.allowed_attrs = allowed_attrs
self.remove_empty_paragraphs = remove_empty_paragraphs
self.remove_excess_whitespace = remove_excess_whitespace
self.remove_linebreaks = remove_linebreaks
super().__init__(*args, **kwargs)
def to_python(self, value: Any) -> str:
value = unquote_plus(str(value))
# TODO: Add some way for the field to highlight/log when HTML is stripped
value = bleach.clean(
value, tags=self.allowed_tags, attributes=self.allowed_attrs, strip=True
)
return tidy_html(
value,
remove_empty_paragraphs=self.remove_empty_paragraphs,
remove_excess_whitespace=self.remove_excess_whitespace,
remove_linebreaks=self.remove_linebreaks,
)
| 27.317073
| 84
| 0.537054
|
from typing import Any, Mapping, Sequence
from urllib.parse import unquote_plus
import bleach
from importo.fields.base import Field
from importo.utils.html import tidy_html
class HTMLField(Field):
allowed_tags = [
"a",
"abbr",
"acronym",
"b",
"bdi",
"blockquote",
"cite",
"code",
"dd",
"dl",
"dt",
"em",
"h2",
"h3",
"h4",
"h5",
"i",
"li",
"ol",
"p",
"small",
"span",
"strong",
"ul",
]
allowed_attrs = {
"a": ["class", "href", "target", "title"],
"abbr": ["title"],
"acronym": ["title"],
"cite": ["dir", "lang", "title"],
"span": ["dir", "class", "lang", "title"],
"h2": ["dir", "class", "lang", "title"],
"h3": ["dir", "class", "lang", "title"],
"h4": ["dir", "class", "lang", "title"],
"h5": ["dir", "class", "lang", "title"],
}
def __init__(
self,
*args,
allowed_tags: Sequence[str] = None,
allowed_attrs: Mapping[str, str] = None,
remove_empty_paragraphs: bool = True,
remove_excess_whitespace: bool = True,
remove_linebreaks: bool = False,
**kwargs,
):
if allowed_tags is not None:
self.allowed_tags = allowed_tags
if allowed_attrs is not None:
self.allowed_attrs = allowed_attrs
self.remove_empty_paragraphs = remove_empty_paragraphs
self.remove_excess_whitespace = remove_excess_whitespace
self.remove_linebreaks = remove_linebreaks
super().__init__(*args, **kwargs)
def to_python(self, value: Any) -> str:
value = unquote_plus(str(value))
value = bleach.clean(
value, tags=self.allowed_tags, attributes=self.allowed_attrs, strip=True
)
return tidy_html(
value,
remove_empty_paragraphs=self.remove_empty_paragraphs,
remove_excess_whitespace=self.remove_excess_whitespace,
remove_linebreaks=self.remove_linebreaks,
)
| true
| true
|
f71595154e1ed423c34fbdbea424fd5fd9cd6d53
| 1,245
|
py
|
Python
|
myroot/global_config.py
|
pinoylearnpython/dev
|
3fd904c594a8c5cab7fd1fe2ad775fd519410a8a
|
[
"MIT"
] | 2
|
2019-10-29T07:41:38.000Z
|
2020-01-31T16:46:15.000Z
|
myroot/global_config.py
|
pinoylearnpython/dev
|
3fd904c594a8c5cab7fd1fe2ad775fd519410a8a
|
[
"MIT"
] | null | null | null |
myroot/global_config.py
|
pinoylearnpython/dev
|
3fd904c594a8c5cab7fd1fe2ad775fd519410a8a
|
[
"MIT"
] | 2
|
2019-04-23T04:40:07.000Z
|
2020-02-17T09:11:48.000Z
|
from django.conf import settings
def global_settings(request):
""" Return custom constant global variables to be
used widely for all of our apps. """
# Current user logged in info
cur_user_id = 0
cur_user_name = ''
cur_user_full_name = ''
if request.user.is_authenticated:
# Get user info
cur_user_id = request.user.id
cur_user_name = request.user.username
cur_user_full_name = request.user.first_name + " " + request.user.last_name
return{
'BASE_URL': settings.BASE_URL,
'SITE_SHORT_NAME': settings.SITE_SHORT_NAME,
'SITE_FULL_NAME': settings.SITE_FULL_NAME,
'SITE_YEAR_STARTED': settings.SITE_YEAR_STARTED,
'SITE_URL_HOME': settings.SITE_URL_HOME,
'SITE_SLOGAN': settings.SITE_SLOGAN,
'SITE_CONTACT_US': settings.SITE_CONTACT_US,
'MIN_CHARS_SEARCH': settings.MIN_CHARS_SEARCH,
'APP_URL_TOP_LOGO': settings.APP_URL_TOP_LOGO,
'GRECAP_SITE_KEY': settings.GRECAP_SITE_KEY,
'DEFAULT_AVATAR': settings.DEFAULT_AVATAR,
'CUR_USER_ID': cur_user_id,
'CUR_USER_name': cur_user_name,
'CUR_USER_full_name': cur_user_full_name.strip(),
}
| 35.571429
| 84
| 0.665863
|
from django.conf import settings
def global_settings(request):
cur_user_id = 0
cur_user_name = ''
cur_user_full_name = ''
if request.user.is_authenticated:
cur_user_id = request.user.id
cur_user_name = request.user.username
cur_user_full_name = request.user.first_name + " " + request.user.last_name
return{
'BASE_URL': settings.BASE_URL,
'SITE_SHORT_NAME': settings.SITE_SHORT_NAME,
'SITE_FULL_NAME': settings.SITE_FULL_NAME,
'SITE_YEAR_STARTED': settings.SITE_YEAR_STARTED,
'SITE_URL_HOME': settings.SITE_URL_HOME,
'SITE_SLOGAN': settings.SITE_SLOGAN,
'SITE_CONTACT_US': settings.SITE_CONTACT_US,
'MIN_CHARS_SEARCH': settings.MIN_CHARS_SEARCH,
'APP_URL_TOP_LOGO': settings.APP_URL_TOP_LOGO,
'GRECAP_SITE_KEY': settings.GRECAP_SITE_KEY,
'DEFAULT_AVATAR': settings.DEFAULT_AVATAR,
'CUR_USER_ID': cur_user_id,
'CUR_USER_name': cur_user_name,
'CUR_USER_full_name': cur_user_full_name.strip(),
}
| true
| true
|
f7159641e3e977f8f51e5cc647c57a31d0966efe
| 1,025
|
py
|
Python
|
server/src/models.py
|
Jobegiar99/Garden-Palooza
|
694acaf42a56f3ecfb2fa3912e3777ad44e3126e
|
[
"MIT"
] | 1
|
2021-08-02T23:33:50.000Z
|
2021-08-02T23:33:50.000Z
|
server/src/models.py
|
Jobegiar99/Garden-Palooza
|
694acaf42a56f3ecfb2fa3912e3777ad44e3126e
|
[
"MIT"
] | 61
|
2021-08-03T00:13:24.000Z
|
2021-08-20T17:38:36.000Z
|
server/src/models.py
|
Jobegiar99/Garden-Palooza
|
694acaf42a56f3ecfb2fa3912e3777ad44e3126e
|
[
"MIT"
] | 1
|
2021-08-22T03:32:42.000Z
|
2021-08-22T03:32:42.000Z
|
# flake8: noqa
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
db = SQLAlchemy()
class UserModel(db.Model):
__tablename__ = "user"
username = db.Column(db.String(36), primary_key=True)
password = db.Column(db.String(30))
def __init__(self, username, password):
self.username = username
self.password = password
def __repr__(self):
return f"<User {self.username}>"
class GardenModel(db.Model):
__tablename__ = "garden"
gardenName = db.Column(db.String(30), primary_key=True)
ownerName = db.Column(db.String(36), db.ForeignKey("user.username"))
# will improve this if we have enough time
firstLayer = db.Column(postgresql.ARRAY(db.Integer()))
secondLayer = db.Column(postgresql.ARRAY(db.Integer()))
def __init__(gardenName, ownerName, firstLayer, secondLayer):
self.gardenName = gardenName
self.ownerName = ownerName
self.firstLayer = firstLayer
self.secondLayer = secondLayer
| 27.702703
| 72
| 0.693659
|
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
db = SQLAlchemy()
class UserModel(db.Model):
__tablename__ = "user"
username = db.Column(db.String(36), primary_key=True)
password = db.Column(db.String(30))
def __init__(self, username, password):
self.username = username
self.password = password
def __repr__(self):
return f"<User {self.username}>"
class GardenModel(db.Model):
__tablename__ = "garden"
gardenName = db.Column(db.String(30), primary_key=True)
ownerName = db.Column(db.String(36), db.ForeignKey("user.username"))
firstLayer = db.Column(postgresql.ARRAY(db.Integer()))
secondLayer = db.Column(postgresql.ARRAY(db.Integer()))
def __init__(gardenName, ownerName, firstLayer, secondLayer):
self.gardenName = gardenName
self.ownerName = ownerName
self.firstLayer = firstLayer
self.secondLayer = secondLayer
| true
| true
|
f715967f3c28b129f56ec6481c8bda553b44d472
| 963
|
py
|
Python
|
lpot/ux/components/model/tensorflow/frozen_pb.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
lpot/ux/components/model/tensorflow/frozen_pb.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
lpot/ux/components/model/tensorflow/frozen_pb.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow frozen pb model."""
from ..model_type_getter import get_model_type
from .model import TensorflowModel as TFModel
class FrozenPbModel(TFModel):
"""Frozen pb model."""
@staticmethod
def supports_path(path: str) -> bool:
"""Check if given path is of supported model."""
return "frozen_pb" == get_model_type(path)
| 34.392857
| 74
| 0.73001
|
from ..model_type_getter import get_model_type
from .model import TensorflowModel as TFModel
class FrozenPbModel(TFModel):
@staticmethod
def supports_path(path: str) -> bool:
return "frozen_pb" == get_model_type(path)
| true
| true
|
f715970fd90159b33cf104a6f896c9d635be8d7d
| 981
|
py
|
Python
|
kafka_to_elastic/kafka_historique_montants_to_elastic.py
|
Neemys/BigCoin
|
13d76eaccf66fd8a50820bb835fe8b69c39a28af
|
[
"Apache-2.0"
] | null | null | null |
kafka_to_elastic/kafka_historique_montants_to_elastic.py
|
Neemys/BigCoin
|
13d76eaccf66fd8a50820bb835fe8b69c39a28af
|
[
"Apache-2.0"
] | 10
|
2018-03-22T09:21:11.000Z
|
2018-04-11T08:50:58.000Z
|
kafka_to_elastic/kafka_historique_montants_to_elastic.py
|
Neemys/BigCoin
|
13d76eaccf66fd8a50820bb835fe8b69c39a28af
|
[
"Apache-2.0"
] | 2
|
2018-03-30T09:52:48.000Z
|
2018-04-11T13:13:36.000Z
|
from bigcoin import bc_kafka,bc_elasticsearch
import json
import datetime
import signal
def generate_elastic_insert_from_messages(messages):
for message in messages:
json_message = json.loads(message)
#value are in satoshi
yield {
'_index' : 'transaction_idx',
'_type': 'transaction',
'_id': json_message['index'],
'_source': {
'date': datetime.datetime.utcfromtimestamp(json_message['timestamp']),
'value': float(json_message["value"])/ 100000000,
'data_type': 'historique'
}
}
def main():
bc_consumer = bc_kafka.BCKafkaConsumer("historique_montants","python_historique_montants_consumer")
bc_es = bc_elasticsearch.BCElasticsearch()
while True:
messages = bc_consumer.get_messages()
if len(messages) == 0:
break
bc_es.send_messages(generate_elastic_insert_from_messages(messages))
bc_consumer.set_messages_read()
#Wait forever for a restart (will be killed then restarted)
signal.pause()
if __name__ == '__main__':
main()
| 26.513514
| 100
| 0.746177
|
from bigcoin import bc_kafka,bc_elasticsearch
import json
import datetime
import signal
def generate_elastic_insert_from_messages(messages):
for message in messages:
json_message = json.loads(message)
yield {
'_index' : 'transaction_idx',
'_type': 'transaction',
'_id': json_message['index'],
'_source': {
'date': datetime.datetime.utcfromtimestamp(json_message['timestamp']),
'value': float(json_message["value"])/ 100000000,
'data_type': 'historique'
}
}
def main():
bc_consumer = bc_kafka.BCKafkaConsumer("historique_montants","python_historique_montants_consumer")
bc_es = bc_elasticsearch.BCElasticsearch()
while True:
messages = bc_consumer.get_messages()
if len(messages) == 0:
break
bc_es.send_messages(generate_elastic_insert_from_messages(messages))
bc_consumer.set_messages_read()
signal.pause()
if __name__ == '__main__':
main()
| true
| true
|
f715986ba969fafbf1bb6c8a7b6a6295ca3828db
| 1,546
|
py
|
Python
|
mltemplate/ci/stages.py
|
vmarkovtsev/ml-repo-template
|
bf3596e2a1c319166092c1fd263ec28ceacc1dd1
|
[
"MIT"
] | null | null | null |
mltemplate/ci/stages.py
|
vmarkovtsev/ml-repo-template
|
bf3596e2a1c319166092c1fd263ec28ceacc1dd1
|
[
"MIT"
] | null | null | null |
mltemplate/ci/stages.py
|
vmarkovtsev/ml-repo-template
|
bf3596e2a1c319166092c1fd263ec28ceacc1dd1
|
[
"MIT"
] | null | null | null |
from mltemplate.ci.core import Stage
from mltemplate.ci.jobs import BumpVersionJob, PypiDeployJob, RunTestsJob, StyleCheckJob
class BumpVersionStage(Stage):
def __init__(self, name="bump-version", **kwargs):
super(BumpVersionStage, self).__init__(
name=name, jobs=[BumpVersionJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
class StyleCheckStage(Stage):
def __init__(self, name="style", **kwargs):
super(StyleCheckStage, self).__init__(
name=name, jobs=[StyleCheckJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
class PytestStage(Stage):
def __init__(self, name="test", python_versions=None, **kwargs):
self.python_versions = [3.6, 3.7, 3.8] if python_versions is None else python_versions
jobs = self._init_jobs(stage=name, **kwargs)
super(PytestStage, self).__init__(name=name, jobs=jobs)
def _init_jobs(self, stage, **kwargs):
def init_test(v, codecov):
job = RunTestsJob(python_version=v, stage=stage, **kwargs)
if codecov:
job["after_success"] = ["codecov"]
return job
last_item = len(self.python_versions) - 1
return [init_test(v, i == last_item) for i, v in enumerate(self.python_versions)]
class PypiDeployStage(Stage):
def __init__(self, name="deploy", **kwargs):
super(PypiDeployStage, self).__init__(
name=name, jobs=[PypiDeployJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
| 35.136364
| 94
| 0.650065
|
from mltemplate.ci.core import Stage
from mltemplate.ci.jobs import BumpVersionJob, PypiDeployJob, RunTestsJob, StyleCheckJob
class BumpVersionStage(Stage):
def __init__(self, name="bump-version", **kwargs):
super(BumpVersionStage, self).__init__(
name=name, jobs=[BumpVersionJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
class StyleCheckStage(Stage):
def __init__(self, name="style", **kwargs):
super(StyleCheckStage, self).__init__(
name=name, jobs=[StyleCheckJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
class PytestStage(Stage):
def __init__(self, name="test", python_versions=None, **kwargs):
self.python_versions = [3.6, 3.7, 3.8] if python_versions is None else python_versions
jobs = self._init_jobs(stage=name, **kwargs)
super(PytestStage, self).__init__(name=name, jobs=jobs)
def _init_jobs(self, stage, **kwargs):
def init_test(v, codecov):
job = RunTestsJob(python_version=v, stage=stage, **kwargs)
if codecov:
job["after_success"] = ["codecov"]
return job
last_item = len(self.python_versions) - 1
return [init_test(v, i == last_item) for i, v in enumerate(self.python_versions)]
class PypiDeployStage(Stage):
def __init__(self, name="deploy", **kwargs):
super(PypiDeployStage, self).__init__(
name=name, jobs=[PypiDeployJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
| true
| true
|
f7159a946ae2267a79e3a78a56dd34aec97345e1
| 1,130
|
py
|
Python
|
simulator/event.py
|
djpetti/molecube
|
b7267803f080ed62e158fc5c1cfcff6beb709de7
|
[
"MIT"
] | 2
|
2018-09-11T21:09:22.000Z
|
2018-10-05T08:35:58.000Z
|
simulator/event.py
|
djpetti/molecube
|
b7267803f080ed62e158fc5c1cfcff6beb709de7
|
[
"MIT"
] | 24
|
2018-09-09T22:51:26.000Z
|
2018-11-29T22:49:57.000Z
|
simulator/event.py
|
djpetti/molecube
|
b7267803f080ed62e158fc5c1cfcff6beb709de7
|
[
"MIT"
] | 1
|
2018-10-16T20:01:20.000Z
|
2018-10-16T20:01:20.000Z
|
class Event(object):
""" Represents a GUI event. """
def __init__(self, tk_event):
"""
Args:
tk_event: The underlying Tkinter event to wrap. """
self._tk_event = tk_event
@classmethod
def get_identifier(cls):
"""
Returns:
The Tkinter identifier for this event. """
raise NotImplementedError("Must be implemented by subclass.")
class MouseEvent(Event):
""" Event involving the mouse. """
def get_pos(self):
"""
Returns:
The position of the mouse during the event, as (x, y). """
return (self._tk_event.x, self._tk_event.y)
class MouseDragEvent(MouseEvent):
""" Emitted every time the mouse is dragged with the primary button held down.
"""
@classmethod
def get_identifier(cls):
return "<B1-Motion>"
class MousePressEvent(MouseEvent):
""" Emitted every time the primary mouse button is pressed. """
@classmethod
def get_identifier(cls):
return "<Button-1>"
class MouseReleaseEvent(MouseEvent):
""" Emitted every time the primary mouse button is released. """
@classmethod
def get_identifier(cls):
return "<ButtonRelease-1>"
| 24.042553
| 80
| 0.676106
|
class Event(object):
def __init__(self, tk_event):
self._tk_event = tk_event
@classmethod
def get_identifier(cls):
raise NotImplementedError("Must be implemented by subclass.")
class MouseEvent(Event):
def get_pos(self):
return (self._tk_event.x, self._tk_event.y)
class MouseDragEvent(MouseEvent):
@classmethod
def get_identifier(cls):
return "<B1-Motion>"
class MousePressEvent(MouseEvent):
@classmethod
def get_identifier(cls):
return "<Button-1>"
class MouseReleaseEvent(MouseEvent):
@classmethod
def get_identifier(cls):
return "<ButtonRelease-1>"
| true
| true
|
f7159b75d0cdb78ddc25a9f3959376ef6d82d188
| 16,663
|
py
|
Python
|
connexion/operations/abstract.py
|
eyalkaspi/connexion
|
9e07c9d5ba554119c38e17d3afc120eec0c1e390
|
[
"Apache-2.0"
] | null | null | null |
connexion/operations/abstract.py
|
eyalkaspi/connexion
|
9e07c9d5ba554119c38e17d3afc120eec0c1e390
|
[
"Apache-2.0"
] | null | null | null |
connexion/operations/abstract.py
|
eyalkaspi/connexion
|
9e07c9d5ba554119c38e17d3afc120eec0c1e390
|
[
"Apache-2.0"
] | null | null | null |
import abc
import logging
from connexion.operations.secure import SecureOperation
from ..decorators.metrics import UWSGIMetricsCollector
from ..decorators.parameter import parameter_to_arg
from ..decorators.produces import BaseSerializer, Produces
from ..decorators.response import ResponseValidator
from ..decorators.validation import ParameterValidator, RequestBodyValidator
from ..utils import all_json, is_nullable, make_type
logger = logging.getLogger('connexion.operations.abstract')
DEFAULT_MIMETYPE = 'application/json'
VALIDATOR_MAP = {
'parameter': ParameterValidator,
'body': RequestBodyValidator,
'response': ResponseValidator,
}
class AbstractOperation(SecureOperation, metaclass=abc.ABCMeta):
"""
An API routes requests to an Operation by a (path, method) pair.
The operation uses a resolver to resolve its handler function.
We use the provided spec to do a bunch of heavy lifting before
(and after) we call security_schemes handler.
The registered handler function ends up looking something like:
@secure_endpoint
@validate_inputs
@deserialize_function_inputs
@serialize_function_outputs
@validate_outputs
def user_provided_handler_function(important, stuff):
if important:
serious_business(stuff)
"""
def __init__(self, api, method, path, operation, resolver,
app_security=None, security_schemes=None,
validate_responses=False, strict_validation=False,
randomize_endpoint=None, validator_map=None,
format_converters=None, pythonic_params=False,
uri_parser_class=None, pass_context_arg_name=None):
"""
:param api: api that this operation is attached to
:type api: apis.AbstractAPI
:param method: HTTP method
:type method: str
:param path:
:type path: str
:param operation: swagger operation object
:type operation: dict
:param resolver: Callable that maps operationID to a function
:param app_produces: list of content types the application can return by default
:param app_security: list of security rules the application uses by default
:type app_security: list
:param security_schemes: `Security Definitions Object
<https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#security-definitions-object>`_
:type security_schemes: dict
:param validate_responses: True enables validation. Validation errors generate HTTP 500 responses.
:type validate_responses: bool
:param strict_validation: True enables validation on invalid request parameters
:type strict_validation: bool
:param randomize_endpoint: number of random characters to append to operation name
:type randomize_endpoint: integer
:param validator_map: Custom validators for the types "parameter", "body" and "response".
:type validator_map: dict
:param format_converters: Custom value converters based on the schema format of properties.
:type format_converters: dict
:param pythonic_params: When True CamelCase parameters are converted to snake_case and an underscore is appended
to any shadowed built-ins
:type pythonic_params: bool
:param uri_parser_class: class to use for uri parseing
:type uri_parser_class: AbstractURIParser
:param pass_context_arg_name: If not None will try to inject the request context to the function using this
name.
:type pass_context_arg_name: str|None
"""
self._api = api
self._method = method
self._path = path
self._operation = operation
self._resolver = resolver
self._security = app_security
self._security_schemes = security_schemes
self._validate_responses = validate_responses
self._strict_validation = strict_validation
self._pythonic_params = pythonic_params
self._uri_parser_class = uri_parser_class
self._pass_context_arg_name = pass_context_arg_name
self._randomize_endpoint = randomize_endpoint
self._operation_id = self._operation.get("operationId")
self._resolution = resolver.resolve(self)
self._operation_id = self._resolution.operation_id
self._responses = self._operation.get("responses", {})
self._validator_map = dict(VALIDATOR_MAP)
self._validator_map.update(validator_map or {})
self._format_converters = format_converters or {}
@property
def method(self):
"""
The HTTP method for this operation (ex. GET, POST)
"""
return self._method
@property
def path(self):
"""
The path of the operation, relative to the API base path
"""
return self._path
@property
def responses(self):
"""
Returns the responses for this operation
"""
return self._responses
@property
def validator_map(self):
"""
Validators to use for parameter, body, and response validation
"""
return self._validator_map
@property
def format_converters(self):
"""
Converters to use to convert input type based on the schema format
attribute.
"""
return self._format_converters
@property
def operation_id(self):
"""
The operation id used to indentify the operation internally to the app
"""
return self._operation_id
@property
def randomize_endpoint(self):
"""
number of random digits to generate and append to the operation_id.
"""
return self._randomize_endpoint
@property
def router_controller(self):
"""
The router controller to use (python module where handler functions live)
"""
return self._router_controller
@property
def strict_validation(self):
"""
If True, validate all requests against the spec
"""
return self._strict_validation
@property
def pythonic_params(self):
"""
If True, convert CamelCase into pythonic_variable_names
"""
return self._pythonic_params
@property
def validate_responses(self):
"""
If True, check the response against the response schema, and return an
error if the response does not validate.
"""
return self._validate_responses
@staticmethod
def _get_file_arguments(files, arguments, has_kwargs=False):
return {k: v for k, v in files.items() if k in arguments or has_kwargs}
@abc.abstractmethod
def _get_val_from_param(self, value, query_defn):
"""
Convert input parameters into the correct type
"""
def _query_args_helper(self, query_defns, query_arguments,
function_arguments, has_kwargs, sanitize):
res = {}
for key, value in query_arguments.items():
key = sanitize(key)
if not has_kwargs and key not in function_arguments:
logger.debug("Query Parameter '%s' not in function arguments", key)
else:
logger.debug("Query Parameter '%s' in function arguments", key)
try:
query_defn = query_defns[key]
except KeyError: # pragma: no cover
logger.error("Function argument '{}' not defined in specification".format(key))
else:
logger.debug('%s is a %s', key, query_defn)
res.update({key: self._get_val_from_param(value, query_defn)})
return res
@abc.abstractmethod
def _get_query_arguments(self, query, arguments, has_kwargs, sanitize):
"""
extract handler function arguments from the query parameters
"""
@abc.abstractmethod
def _get_body_argument(self, body, arguments, has_kwargs, sanitize):
"""
extract handler function arguments from the request body
"""
def _get_path_arguments(self, path_params, sanitize):
"""
extract handler function arguments from path parameters
"""
kwargs = {}
path_defns = {p["name"]: p for p in self.parameters if p["in"] == "path"}
for key, value in path_params.items():
sanitized_key = sanitize(key)
if key in path_defns:
kwargs[sanitized_key] = self._get_val_from_param(value, path_defns[key])
else: # Assume path params mechanism used for injection
kwargs[sanitized_key] = value
return kwargs
@abc.abstractproperty
def parameters(self):
"""
Returns the parameters for this operation
"""
@abc.abstractproperty
def produces(self):
"""
Content-Types that the operation produces
"""
@abc.abstractproperty
def consumes(self):
"""
Content-Types that the operation consumes
"""
@abc.abstractproperty
def body_schema(self):
"""
The body schema definition for this operation.
"""
@abc.abstractproperty
def body_definition(self):
"""
The body definition for this operation.
:rtype: dict
"""
def get_arguments(self, path_params, query_params, body, files, arguments,
has_kwargs, sanitize):
"""
get arguments for handler function
"""
ret = {}
ret.update(self._get_path_arguments(path_params, sanitize))
ret.update(self._get_query_arguments(query_params, arguments,
has_kwargs, sanitize))
if self.method.upper() in ["PATCH", "POST", "PUT"]:
ret.update(self._get_body_argument(body, arguments,
has_kwargs, sanitize))
ret.update(self._get_file_arguments(files, arguments, has_kwargs))
return ret
def response_definition(self, status_code=None,
content_type=None):
"""
response definition for this endpoint
"""
content_type = content_type or self.get_mimetype()
response_definition = self.responses.get(
str(status_code),
self.responses.get("default", {})
)
return response_definition
@abc.abstractmethod
def response_schema(self, status_code=None, content_type=None):
"""
response schema for this endpoint
"""
@abc.abstractmethod
def example_response(self, status_code=None, content_type=None):
"""
Returns an example from the spec
"""
@abc.abstractmethod
def get_path_parameter_types(self):
"""
Returns the types for parameters in the path
"""
@abc.abstractmethod
def with_definitions(self, schema):
"""
Returns the given schema, but with the definitions from the spec
attached. This allows any remaining references to be resolved by a
validator (for example).
"""
def get_mimetype(self):
"""
If the endpoint has no 'produces' then the default is
'application/json'.
:rtype str
"""
if all_json(self.produces):
try:
return self.produces[0]
except IndexError:
return DEFAULT_MIMETYPE
elif len(self.produces) == 1:
return self.produces[0]
else:
return DEFAULT_MIMETYPE
@property
def _uri_parsing_decorator(self):
"""
Returns a decorator that parses request data and handles things like
array types, and duplicate parameter definitions.
"""
return self._uri_parser_class(self.parameters, self.body_definition)
@property
def function(self):
"""
Operation function with decorators
:rtype: types.FunctionType
"""
function = parameter_to_arg(
self, self._resolution.function, self.pythonic_params,
self._pass_context_arg_name
)
if self.validate_responses:
logger.debug('... Response validation enabled.')
response_decorator = self.__response_validation_decorator
logger.debug('... Adding response decorator (%r)', response_decorator)
function = response_decorator(function)
produces_decorator = self.__content_type_decorator
logger.debug('... Adding produces decorator (%r)', produces_decorator)
function = produces_decorator(function)
for validation_decorator in self.__validation_decorators:
function = validation_decorator(function)
uri_parsing_decorator = self._uri_parsing_decorator
function = uri_parsing_decorator(function)
# NOTE: the security decorator should be applied last to check auth before anything else :-)
security_decorator = self.security_decorator
logger.debug('... Adding security decorator (%r)', security_decorator)
function = security_decorator(function)
function = self._request_response_decorator(function)
if UWSGIMetricsCollector.is_available(): # pragma: no cover
decorator = UWSGIMetricsCollector(self.path, self.method)
function = decorator(function)
return function
@property
def __content_type_decorator(self):
"""
Get produces decorator.
If the operation mimetype format is json then the function return value is jsonified
From Swagger Specification:
**Produces**
A list of MIME types the operation can produce. This overrides the produces definition at the Swagger Object.
An empty value MAY be used to clear the global definition.
:rtype: types.FunctionType
"""
logger.debug('... Produces: %s', self.produces, extra=vars(self))
mimetype = self.get_mimetype()
if all_json(self.produces): # endpoint will return json
logger.debug('... Produces json', extra=vars(self))
# TODO: Refactor this.
return lambda f: f
elif len(self.produces) == 1:
logger.debug('... Produces %s', mimetype, extra=vars(self))
decorator = Produces(mimetype)
return decorator
else:
return BaseSerializer()
@property
def __validation_decorators(self):
"""
:rtype: types.FunctionType
"""
ParameterValidator = self.validator_map['parameter']
RequestBodyValidator = self.validator_map['body']
if self.parameters:
yield ParameterValidator(self.parameters,
self.api,
strict_validation=self.strict_validation)
if self.body_schema:
yield RequestBodyValidator(self.body_schema, self.consumes, self.api,
is_nullable(self.body_definition),
strict_validation=self.strict_validation)
@property
def __response_validation_decorator(self):
"""
Get a decorator for validating the generated Response.
:rtype: types.FunctionType
"""
ResponseValidator = self.validator_map['response']
return ResponseValidator(self, self.get_mimetype())
def convert_type(self, value, _type, _format=None):
"""
Convert the input value to the corresponding python type.
:param value: The raw input value from the HTTP request.
:param _type: The type of the property as defined in the schema.
:param _format: The optional format of the property as defined in the schema.
:return: The input value converted to the python type.
"""
typed_value = make_type(value, _type)
type_converters = self.format_converters.get(_type)
if not type_converters:
return typed_value
format_converter = type_converters.get(_format)
if not format_converter:
return typed_value
return format_converter(_type, _format, value)
def json_loads(self, data):
"""
A wrapper for calling the API specific JSON loader.
:param data: The JSON data in textual form.
:type data: bytes
"""
return self.api.json_loads(data)
| 34.932914
| 120
| 0.63434
|
import abc
import logging
from connexion.operations.secure import SecureOperation
from ..decorators.metrics import UWSGIMetricsCollector
from ..decorators.parameter import parameter_to_arg
from ..decorators.produces import BaseSerializer, Produces
from ..decorators.response import ResponseValidator
from ..decorators.validation import ParameterValidator, RequestBodyValidator
from ..utils import all_json, is_nullable, make_type
logger = logging.getLogger('connexion.operations.abstract')
DEFAULT_MIMETYPE = 'application/json'
VALIDATOR_MAP = {
'parameter': ParameterValidator,
'body': RequestBodyValidator,
'response': ResponseValidator,
}
class AbstractOperation(SecureOperation, metaclass=abc.ABCMeta):
def __init__(self, api, method, path, operation, resolver,
app_security=None, security_schemes=None,
validate_responses=False, strict_validation=False,
randomize_endpoint=None, validator_map=None,
format_converters=None, pythonic_params=False,
uri_parser_class=None, pass_context_arg_name=None):
self._api = api
self._method = method
self._path = path
self._operation = operation
self._resolver = resolver
self._security = app_security
self._security_schemes = security_schemes
self._validate_responses = validate_responses
self._strict_validation = strict_validation
self._pythonic_params = pythonic_params
self._uri_parser_class = uri_parser_class
self._pass_context_arg_name = pass_context_arg_name
self._randomize_endpoint = randomize_endpoint
self._operation_id = self._operation.get("operationId")
self._resolution = resolver.resolve(self)
self._operation_id = self._resolution.operation_id
self._responses = self._operation.get("responses", {})
self._validator_map = dict(VALIDATOR_MAP)
self._validator_map.update(validator_map or {})
self._format_converters = format_converters or {}
@property
def method(self):
return self._method
@property
def path(self):
return self._path
@property
def responses(self):
return self._responses
@property
def validator_map(self):
return self._validator_map
@property
def format_converters(self):
return self._format_converters
@property
def operation_id(self):
return self._operation_id
@property
def randomize_endpoint(self):
return self._randomize_endpoint
@property
def router_controller(self):
return self._router_controller
@property
def strict_validation(self):
return self._strict_validation
@property
def pythonic_params(self):
return self._pythonic_params
@property
def validate_responses(self):
return self._validate_responses
@staticmethod
def _get_file_arguments(files, arguments, has_kwargs=False):
return {k: v for k, v in files.items() if k in arguments or has_kwargs}
@abc.abstractmethod
def _get_val_from_param(self, value, query_defn):
def _query_args_helper(self, query_defns, query_arguments,
function_arguments, has_kwargs, sanitize):
res = {}
for key, value in query_arguments.items():
key = sanitize(key)
if not has_kwargs and key not in function_arguments:
logger.debug("Query Parameter '%s' not in function arguments", key)
else:
logger.debug("Query Parameter '%s' in function arguments", key)
try:
query_defn = query_defns[key]
except KeyError:
logger.error("Function argument '{}' not defined in specification".format(key))
else:
logger.debug('%s is a %s', key, query_defn)
res.update({key: self._get_val_from_param(value, query_defn)})
return res
@abc.abstractmethod
def _get_query_arguments(self, query, arguments, has_kwargs, sanitize):
@abc.abstractmethod
def _get_body_argument(self, body, arguments, has_kwargs, sanitize):
def _get_path_arguments(self, path_params, sanitize):
kwargs = {}
path_defns = {p["name"]: p for p in self.parameters if p["in"] == "path"}
for key, value in path_params.items():
sanitized_key = sanitize(key)
if key in path_defns:
kwargs[sanitized_key] = self._get_val_from_param(value, path_defns[key])
else:
kwargs[sanitized_key] = value
return kwargs
@abc.abstractproperty
def parameters(self):
@abc.abstractproperty
def produces(self):
@abc.abstractproperty
def consumes(self):
@abc.abstractproperty
def body_schema(self):
@abc.abstractproperty
def body_definition(self):
def get_arguments(self, path_params, query_params, body, files, arguments,
has_kwargs, sanitize):
ret = {}
ret.update(self._get_path_arguments(path_params, sanitize))
ret.update(self._get_query_arguments(query_params, arguments,
has_kwargs, sanitize))
if self.method.upper() in ["PATCH", "POST", "PUT"]:
ret.update(self._get_body_argument(body, arguments,
has_kwargs, sanitize))
ret.update(self._get_file_arguments(files, arguments, has_kwargs))
return ret
def response_definition(self, status_code=None,
content_type=None):
content_type = content_type or self.get_mimetype()
response_definition = self.responses.get(
str(status_code),
self.responses.get("default", {})
)
return response_definition
@abc.abstractmethod
def response_schema(self, status_code=None, content_type=None):
@abc.abstractmethod
def example_response(self, status_code=None, content_type=None):
@abc.abstractmethod
def get_path_parameter_types(self):
@abc.abstractmethod
def with_definitions(self, schema):
def get_mimetype(self):
if all_json(self.produces):
try:
return self.produces[0]
except IndexError:
return DEFAULT_MIMETYPE
elif len(self.produces) == 1:
return self.produces[0]
else:
return DEFAULT_MIMETYPE
@property
def _uri_parsing_decorator(self):
return self._uri_parser_class(self.parameters, self.body_definition)
@property
def function(self):
function = parameter_to_arg(
self, self._resolution.function, self.pythonic_params,
self._pass_context_arg_name
)
if self.validate_responses:
logger.debug('... Response validation enabled.')
response_decorator = self.__response_validation_decorator
logger.debug('... Adding response decorator (%r)', response_decorator)
function = response_decorator(function)
produces_decorator = self.__content_type_decorator
logger.debug('... Adding produces decorator (%r)', produces_decorator)
function = produces_decorator(function)
for validation_decorator in self.__validation_decorators:
function = validation_decorator(function)
uri_parsing_decorator = self._uri_parsing_decorator
function = uri_parsing_decorator(function)
security_decorator = self.security_decorator
logger.debug('... Adding security decorator (%r)', security_decorator)
function = security_decorator(function)
function = self._request_response_decorator(function)
if UWSGIMetricsCollector.is_available():
decorator = UWSGIMetricsCollector(self.path, self.method)
function = decorator(function)
return function
@property
def __content_type_decorator(self):
logger.debug('... Produces: %s', self.produces, extra=vars(self))
mimetype = self.get_mimetype()
if all_json(self.produces):
logger.debug('... Produces json', extra=vars(self))
return lambda f: f
elif len(self.produces) == 1:
logger.debug('... Produces %s', mimetype, extra=vars(self))
decorator = Produces(mimetype)
return decorator
else:
return BaseSerializer()
@property
def __validation_decorators(self):
ParameterValidator = self.validator_map['parameter']
RequestBodyValidator = self.validator_map['body']
if self.parameters:
yield ParameterValidator(self.parameters,
self.api,
strict_validation=self.strict_validation)
if self.body_schema:
yield RequestBodyValidator(self.body_schema, self.consumes, self.api,
is_nullable(self.body_definition),
strict_validation=self.strict_validation)
@property
def __response_validation_decorator(self):
ResponseValidator = self.validator_map['response']
return ResponseValidator(self, self.get_mimetype())
def convert_type(self, value, _type, _format=None):
typed_value = make_type(value, _type)
type_converters = self.format_converters.get(_type)
if not type_converters:
return typed_value
format_converter = type_converters.get(_format)
if not format_converter:
return typed_value
return format_converter(_type, _format, value)
def json_loads(self, data):
return self.api.json_loads(data)
| true
| true
|
f7159bc7a6e447bf791158449870039af24b7945
| 2,451
|
py
|
Python
|
examples/python/lis2ds12.py
|
moredu/upm
|
d6f76ff8c231417666594214679c49399513112e
|
[
"MIT"
] | 619
|
2015-01-14T23:50:18.000Z
|
2019-11-08T14:04:33.000Z
|
examples/python/lis2ds12.py
|
moredu/upm
|
d6f76ff8c231417666594214679c49399513112e
|
[
"MIT"
] | 576
|
2015-01-02T09:55:14.000Z
|
2019-11-12T15:31:10.000Z
|
examples/python/lis2ds12.py
|
moredu/upm
|
d6f76ff8c231417666594214679c49399513112e
|
[
"MIT"
] | 494
|
2015-01-14T18:33:56.000Z
|
2019-11-07T10:08:15.000Z
|
#!/usr/bin/env python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016-2017 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_lis2ds12 as sensorObj
def main():
# Instantiate a LIS2DS12 instance using default i2c bus and address
sensor = sensorObj.LIS2DS12()
# For SPI, bus 0, you would pass -1 as the address, and a valid pin for CS:
# LIS2DS12(0, -1, 10);
## Exit handlers ##
# This function stops python from printing a stacktrace when you
# hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# now output data every 250 milliseconds
while (1):
sensor.update()
data = sensor.getAccelerometer()
print("Accelerometer x:", data[0], end=' ')
print(" y:", data[1], end=' ')
print(" z:", data[2], end=' ')
print(" g")
# we show both C and F for temperature
print("Compensation Temperature:", sensor.getTemperature(), "C /", end=' ')
print(sensor.getTemperature(True), "F")
print()
time.sleep(.250)
if __name__ == '__main__':
main()
| 35.521739
| 83
| 0.696042
|
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_lis2ds12 as sensorObj
def main():
sensor = sensorObj.LIS2DS12()
GINTHandler(signum, frame):
raise SystemExit
def exitHandler():
print("Exiting")
sys.exit(0)
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while (1):
sensor.update()
data = sensor.getAccelerometer()
print("Accelerometer x:", data[0], end=' ')
print(" y:", data[1], end=' ')
print(" z:", data[2], end=' ')
print(" g")
print("Compensation Temperature:", sensor.getTemperature(), "C /", end=' ')
print(sensor.getTemperature(True), "F")
print()
time.sleep(.250)
if __name__ == '__main__':
main()
| true
| true
|
f7159c0f90f16cb4e374669e5d3e907a7304876f
| 8,515
|
py
|
Python
|
pcdet/datasets/augmentor/data_augmentor.py
|
Jasonkks/mlcnet
|
8f89c860c709733c8baa663607004fc48d76291d
|
[
"Apache-2.0"
] | 18
|
2021-11-30T15:19:53.000Z
|
2022-03-30T15:15:57.000Z
|
pcdet/datasets/augmentor/data_augmentor.py
|
Jasonkks/mlcnet
|
8f89c860c709733c8baa663607004fc48d76291d
|
[
"Apache-2.0"
] | 2
|
2021-12-10T06:38:18.000Z
|
2022-03-27T21:45:53.000Z
|
pcdet/datasets/augmentor/data_augmentor.py
|
Jasonkks/mlcnet
|
8f89c860c709733c8baa663607004fc48d76291d
|
[
"Apache-2.0"
] | 3
|
2021-12-01T06:25:52.000Z
|
2022-01-21T14:13:51.000Z
|
from functools import partial
import torch
import random
import numpy as np
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, box_utils
from . import augmentor_utils, database_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def object_size_normalization(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.object_size_normalization, config=config)
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
if gt_boxes.shape[1] > 7:
gt_boxes = gt_boxes[:,:7]
offset = np.array(config['OFFSET'])
# get masks of points inside boxes
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)).numpy()
num_obj = gt_boxes.shape[0]
obj_points_list = []
gt_boxes_size = gt_boxes[:, 3:6]
new_gt_boxes_size = gt_boxes_size + offset
scale_factor = new_gt_boxes_size / gt_boxes_size
# scale the objects
for i in range(num_obj):
point_mask = point_masks[i]
obj_points = points[point_mask > 0] # get object points within the gt box
obj_points[:, :3] -= gt_boxes[i, :3] # relative to box center
obj_points[:, :3] *= scale_factor[i] # scale
obj_points[:, :3] += gt_boxes[i, :3] # back to global coordinate
obj_points_list.append(obj_points)
# remove points inside boxes
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
# scale the boxes
gt_boxes[:, 3:6] *= scale_factor
# remove points inside boxes
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
# merge points
# points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
obj_points = np.concatenate(obj_points_list, axis=0)
points = np.concatenate([points, obj_points], axis=0)
data_dict['points'] = points
data_dict['gt_boxes'][:,:7] = gt_boxes
return data_dict
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None
points = data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
if 'gt_boxes' in data_dict:
gt_boxes, points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points, return_enable=True
)
else:
points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s_points' % cur_axis)(
points, return_enable=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_flip_enabled'] = world_flip_enabled
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
if 'gt_boxes' in data_dict:
gt_boxes, points, world_rotation = augmentor_utils.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range, return_rotation=True
)
else:
points, world_rotation = augmentor_utils.global_rotation_points(
data_dict['points'], rot_range=rot_range, return_rotation=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_rotation'] = world_rotation
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
if 'gt_boxes' in data_dict:
gt_boxes, points, scale_ratio = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']
)
else:
points, scale_ratio = augmentor_utils.global_scaling_points(data_dict['points'], config['WORLD_SCALE_RANGE'])
data_dict['world_scaling'] = scale_ratio
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_scaling_xyz(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling_xyz, config=config)
gt_boxes = data_dict['gt_boxes']
points = data_dict['points']
scale_range = config['SCALE_RANGE']
noise_scale = np.random.uniform(scale_range[0], scale_range[1], 3)
points[:, :3] *= noise_scale
gt_boxes[:, :3] *= noise_scale
gt_boxes[:, 3:6] *= noise_scale
data_dict['points'] = points
data_dict['gt_boxes'] = gt_boxes
data_dict['world_scaling_xyz'] = noise_scale
return data_dict
def jitter_point_cloud(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.jitter_point_cloud, config=config)
sigma = config['SIGMA']
clip = config['CLIP']
assert(clip > 0)
points = data_dict['points']
jittered_data = np.clip(sigma * np.random.randn(points.shape[0], points.shape[1]), -1*clip, clip)
points += jittered_data
data_dict['points'] = points
data_dict['jittered'] = True
data_dict['jitter_values'] = jittered_data
return data_dict
def random_world_shift(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_shift, config=config)
shift_range = config['RANGE']
shifts = np.random.uniform(-shift_range, shift_range, 3)
data_dict['points'] += shifts
data_dict['world_shifts'] = shifts
return data_dict
def forward(self, data_dict, augment=True):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
if augment:
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes' in data_dict and 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
| 40.165094
| 121
| 0.622548
|
from functools import partial
import torch
import random
import numpy as np
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, box_utils
from . import augmentor_utils, database_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def object_size_normalization(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.object_size_normalization, config=config)
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
if gt_boxes.shape[1] > 7:
gt_boxes = gt_boxes[:,:7]
offset = np.array(config['OFFSET'])
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)).numpy()
num_obj = gt_boxes.shape[0]
obj_points_list = []
gt_boxes_size = gt_boxes[:, 3:6]
new_gt_boxes_size = gt_boxes_size + offset
scale_factor = new_gt_boxes_size / gt_boxes_size
for i in range(num_obj):
point_mask = point_masks[i]
obj_points = points[point_mask > 0]
obj_points[:, :3] -= gt_boxes[i, :3]
obj_points[:, :3] *= scale_factor[i]
obj_points[:, :3] += gt_boxes[i, :3]
obj_points_list.append(obj_points)
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
gt_boxes[:, 3:6] *= scale_factor
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
obj_points = np.concatenate(obj_points_list, axis=0)
points = np.concatenate([points, obj_points], axis=0)
data_dict['points'] = points
data_dict['gt_boxes'][:,:7] = gt_boxes
return data_dict
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None
points = data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
if 'gt_boxes' in data_dict:
gt_boxes, points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points, return_enable=True
)
else:
points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s_points' % cur_axis)(
points, return_enable=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_flip_enabled'] = world_flip_enabled
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
if 'gt_boxes' in data_dict:
gt_boxes, points, world_rotation = augmentor_utils.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range, return_rotation=True
)
else:
points, world_rotation = augmentor_utils.global_rotation_points(
data_dict['points'], rot_range=rot_range, return_rotation=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_rotation'] = world_rotation
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
if 'gt_boxes' in data_dict:
gt_boxes, points, scale_ratio = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']
)
else:
points, scale_ratio = augmentor_utils.global_scaling_points(data_dict['points'], config['WORLD_SCALE_RANGE'])
data_dict['world_scaling'] = scale_ratio
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_scaling_xyz(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling_xyz, config=config)
gt_boxes = data_dict['gt_boxes']
points = data_dict['points']
scale_range = config['SCALE_RANGE']
noise_scale = np.random.uniform(scale_range[0], scale_range[1], 3)
points[:, :3] *= noise_scale
gt_boxes[:, :3] *= noise_scale
gt_boxes[:, 3:6] *= noise_scale
data_dict['points'] = points
data_dict['gt_boxes'] = gt_boxes
data_dict['world_scaling_xyz'] = noise_scale
return data_dict
def jitter_point_cloud(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.jitter_point_cloud, config=config)
sigma = config['SIGMA']
clip = config['CLIP']
assert(clip > 0)
points = data_dict['points']
jittered_data = np.clip(sigma * np.random.randn(points.shape[0], points.shape[1]), -1*clip, clip)
points += jittered_data
data_dict['points'] = points
data_dict['jittered'] = True
data_dict['jitter_values'] = jittered_data
return data_dict
def random_world_shift(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_shift, config=config)
shift_range = config['RANGE']
shifts = np.random.uniform(-shift_range, shift_range, 3)
data_dict['points'] += shifts
data_dict['world_shifts'] = shifts
return data_dict
def forward(self, data_dict, augment=True):
if augment:
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes' in data_dict and 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
| true
| true
|
f7159c350fdf2aa74b7565b424ed07b5ef99b118
| 733
|
py
|
Python
|
services/migrations/0010_auto_20170729_0711.py
|
iesteban/bitcoin_bazaar_backend
|
2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0
|
[
"MIT"
] | 18
|
2017-03-08T06:30:55.000Z
|
2020-05-08T17:30:20.000Z
|
services/migrations/0010_auto_20170729_0711.py
|
iesteban/bitcoin_bazaar_backend
|
2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0
|
[
"MIT"
] | 871
|
2017-03-06T21:03:59.000Z
|
2022-03-28T19:46:44.000Z
|
services/migrations/0010_auto_20170729_0711.py
|
iesteban/bitcoin_bazaar_backend
|
2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0
|
[
"MIT"
] | 5
|
2017-07-07T12:10:47.000Z
|
2020-05-13T15:57:56.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-29 07:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0009_auto_20170617_1557'),
]
operations = [
migrations.AddField(
model_name='category',
name='name_en',
field=models.CharField(help_text='A name for the category.', max_length=100, null=True, unique=True),
),
migrations.AddField(
model_name='category',
name='name_es',
field=models.CharField(help_text='A name for the category.', max_length=100, null=True, unique=True),
),
]
| 28.192308
| 113
| 0.618008
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0009_auto_20170617_1557'),
]
operations = [
migrations.AddField(
model_name='category',
name='name_en',
field=models.CharField(help_text='A name for the category.', max_length=100, null=True, unique=True),
),
migrations.AddField(
model_name='category',
name='name_es',
field=models.CharField(help_text='A name for the category.', max_length=100, null=True, unique=True),
),
]
| true
| true
|
f7159d3af512db4cfc343827849b64501a5eca32
| 2,770
|
py
|
Python
|
apps/project/subviews/bug.py
|
gvizquel/pyerp
|
c859f7293cabd1003f79112463cee93ac89fccba
|
[
"MIT"
] | null | null | null |
apps/project/subviews/bug.py
|
gvizquel/pyerp
|
c859f7293cabd1003f79112463cee93ac89fccba
|
[
"MIT"
] | 11
|
2020-06-05T22:50:37.000Z
|
2022-02-10T09:05:56.000Z
|
apps/project/subviews/bug.py
|
gvizquel/pyerp
|
c859f7293cabd1003f79112463cee93ac89fccba
|
[
"MIT"
] | null | null | null |
# Librerias Django
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
# Librerias en carpetas locales
from ..submodels.bug import PyBug
""" BEGIN BUG """
BUG_FIELDS = [
{'string': 'Nombre', 'field': 'name'},
{'string': 'Estado', 'field': 'state'},
{'string': 'Usuario', 'field': 'user_id'},
{'string': 'Notas', 'field': 'note'},
]
BUG_FIELDS_SHORT = ['name','state','user_id','note']
class BugListView(LoginRequiredMixin, ListView):
model = PyBug
template_name = 'erp/list.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugListView, self).get_context_data(**kwargs)
context['title'] = 'Errores'
context['detail_url'] = 'bug-detail'
context['add_url'] = 'bug-add'
context['fields'] = BUG_FIELDS
return context
class BugDetailView(LoginRequiredMixin, DetailView):
model = PyBug
template_name = 'erp/detail.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugDetailView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['update_url'] = 'bug-update'
context['delete_url'] = 'bug-delete'
context['fields'] = BUG_FIELDS
return context
class BugCreateView(LoginRequiredMixin, CreateView):
model = PyBug
fields = BUG_FIELDS_SHORT
template_name = 'erp/form.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugCreateView, self).get_context_data(**kwargs)
context['title'] = 'Crear Error'
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['back_url'] = reverse('bug')
return context
class BugUpdateView(LoginRequiredMixin, UpdateView):
model = PyBug
fields = BUG_FIELDS_SHORT
template_name = 'erp/form.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugUpdateView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['back_url'] = reverse('bug-detail', kwargs={'pk': context['object'].pk})
return context
@login_required(login_url="/erp/login")
def DeleteBug(self, pk):
bug = PyBug.objects.get(id=pk)
bug.delete()
return redirect(reverse('bug'))
| 33.780488
| 88
| 0.648014
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
from ..submodels.bug import PyBug
BUG_FIELDS = [
{'string': 'Nombre', 'field': 'name'},
{'string': 'Estado', 'field': 'state'},
{'string': 'Usuario', 'field': 'user_id'},
{'string': 'Notas', 'field': 'note'},
]
BUG_FIELDS_SHORT = ['name','state','user_id','note']
class BugListView(LoginRequiredMixin, ListView):
model = PyBug
template_name = 'erp/list.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugListView, self).get_context_data(**kwargs)
context['title'] = 'Errores'
context['detail_url'] = 'bug-detail'
context['add_url'] = 'bug-add'
context['fields'] = BUG_FIELDS
return context
class BugDetailView(LoginRequiredMixin, DetailView):
model = PyBug
template_name = 'erp/detail.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugDetailView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['update_url'] = 'bug-update'
context['delete_url'] = 'bug-delete'
context['fields'] = BUG_FIELDS
return context
class BugCreateView(LoginRequiredMixin, CreateView):
model = PyBug
fields = BUG_FIELDS_SHORT
template_name = 'erp/form.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugCreateView, self).get_context_data(**kwargs)
context['title'] = 'Crear Error'
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['back_url'] = reverse('bug')
return context
class BugUpdateView(LoginRequiredMixin, UpdateView):
model = PyBug
fields = BUG_FIELDS_SHORT
template_name = 'erp/form.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugUpdateView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['back_url'] = reverse('bug-detail', kwargs={'pk': context['object'].pk})
return context
@login_required(login_url="/erp/login")
def DeleteBug(self, pk):
bug = PyBug.objects.get(id=pk)
bug.delete()
return redirect(reverse('bug'))
| true
| true
|
f7159d5a2d920dc9cc5bb8cc18005b68413166a5
| 2,828
|
py
|
Python
|
Efficient-3DCNNs/thop/count_hooks.py
|
reetikaag/human-activity-recognition
|
1e6760a88ca52fe9a8a8ca60d000cd3426851156
|
[
"MIT"
] | null | null | null |
Efficient-3DCNNs/thop/count_hooks.py
|
reetikaag/human-activity-recognition
|
1e6760a88ca52fe9a8a8ca60d000cd3426851156
|
[
"MIT"
] | null | null | null |
Efficient-3DCNNs/thop/count_hooks.py
|
reetikaag/human-activity-recognition
|
1e6760a88ca52fe9a8a8ca60d000cd3426851156
|
[
"MIT"
] | null | null | null |
import argparse
import torch
import torch.nn as nn
multiply_adds = 1
def count_conv2d(m, x, y):
# TODO: add support for pad and dilation
x = x[0]
cin = m.in_channels
cout = m.out_channels
kh, kw = m.kernel_size
batch_size = x.size()[0]
out_w = y.size(2)
out_h = y.size(3)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
output_elements = batch_size * out_w * out_h * cout
total_ops = output_elements * ops_per_element
# in case same conv is used multiple times
m.total_ops += torch.Tensor([int(total_ops)])
def count_conv3d(m, x, y):
# TODO: add support for pad and dilation
x = x[0]
cin = m.in_channels
cout = m.out_channels
kd, kh, kw = m.kernel_size
batch_size = x.size()[0]
out_d = y.size(2)
out_w = y.size(3)
out_h = y.size(4)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kd * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
output_elements = batch_size * out_d * out_w * out_h * cout
total_ops = output_elements * ops_per_element
# in case same conv is used multiple times
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_bn2d(m, x, y):
x = x[0]
nelements = x.numel()
total_sub = nelements
total_div = nelements
total_ops = total_sub + total_div
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_relu(m, x, y):
x = x[0]
nelements = x.numel()
total_ops = nelements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_softmax(m, x, y):
x = x[0]
batch_size, nfeatures = x.size()
total_exp = nfeatures
total_add = nfeatures - 1
total_div = nfeatures
total_ops = batch_size * (total_exp + total_add + total_div)
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_maxpool(m, x, y):
kernel_ops = torch.prod(torch.Tensor([m.kernel_size])) - 1
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_avgpool(m, x, y):
total_add = torch.prod(torch.Tensor([m.kernel_size])) - 1
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_linear(m, x, y):
# per output element
total_mul = m.in_features
total_add = m.in_features - 1
num_elements = y.numel()
total_ops = (total_mul + total_add) * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
| 22.624
| 61
| 0.684936
|
import argparse
import torch
import torch.nn as nn
multiply_adds = 1
def count_conv2d(m, x, y):
x = x[0]
cin = m.in_channels
cout = m.out_channels
kh, kw = m.kernel_size
batch_size = x.size()[0]
out_w = y.size(2)
out_h = y.size(3)
kernel_ops = multiply_adds * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
output_elements = batch_size * out_w * out_h * cout
total_ops = output_elements * ops_per_element
m.total_ops += torch.Tensor([int(total_ops)])
def count_conv3d(m, x, y):
x = x[0]
cin = m.in_channels
cout = m.out_channels
kd, kh, kw = m.kernel_size
batch_size = x.size()[0]
out_d = y.size(2)
out_w = y.size(3)
out_h = y.size(4)
kernel_ops = multiply_adds * kd * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
output_elements = batch_size * out_d * out_w * out_h * cout
total_ops = output_elements * ops_per_element
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_bn2d(m, x, y):
x = x[0]
nelements = x.numel()
total_sub = nelements
total_div = nelements
total_ops = total_sub + total_div
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_relu(m, x, y):
x = x[0]
nelements = x.numel()
total_ops = nelements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_softmax(m, x, y):
x = x[0]
batch_size, nfeatures = x.size()
total_exp = nfeatures
total_add = nfeatures - 1
total_div = nfeatures
total_ops = batch_size * (total_exp + total_add + total_div)
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_maxpool(m, x, y):
kernel_ops = torch.prod(torch.Tensor([m.kernel_size])) - 1
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_avgpool(m, x, y):
total_add = torch.prod(torch.Tensor([m.kernel_size])) - 1
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_linear(m, x, y):
total_mul = m.in_features
total_add = m.in_features - 1
num_elements = y.numel()
total_ops = (total_mul + total_add) * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
| true
| true
|
f7159d9791448f06c36331ba8b9839a880d17d19
| 4,281
|
py
|
Python
|
ambari-agent/src/test/python/ambari_agent/TestClusterConfigurationCache.py
|
tqrg-bot/ambari
|
05cd35982b30f424cec0b5b9d93bc4709880a3bc
|
[
"Apache-2.0"
] | null | null | null |
ambari-agent/src/test/python/ambari_agent/TestClusterConfigurationCache.py
|
tqrg-bot/ambari
|
05cd35982b30f424cec0b5b9d93bc4709880a3bc
|
[
"Apache-2.0"
] | null | null | null |
ambari-agent/src/test/python/ambari_agent/TestClusterConfigurationCache.py
|
tqrg-bot/ambari
|
05cd35982b30f424cec0b5b9d93bc4709880a3bc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
from ambari_agent.ClusterConfigurationCache import ClusterConfigurationCache
from mock.mock import MagicMock, patch, mock_open, ANY
from unittest import TestCase
class TestClusterConfigurationCache(TestCase):
o_flags = os.O_WRONLY | os.O_CREAT
perms = 0o600
def setUp(self):
# save original open() method for later use
self.original_open = open
def tearDown(self):
sys.stdout == sys.__stdout__
@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("os.path.isfile", new = MagicMock(return_value=True))
def test_cluster_configuration_cache_initialization(self):
configuration_json = '{ "c1" : { "foo-site" : { "foo" : "bar", "foobar" : "baz" } } }'
open_mock = mock_open(read_data=configuration_json)
with patch("__builtin__.open", open_mock):
cluster_configuration = ClusterConfigurationCache(os.path.join(os.sep, "tmp", "bar", "baz"))
open_mock.assert_called_with(os.sep + "tmp" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json", 'r')
self.assertEqual('bar', cluster_configuration.get_configuration_value('c1', 'foo-site/foo') )
self.assertEqual('baz', cluster_configuration.get_configuration_value('c1', 'foo-site/foobar') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID/INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foo') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foobar') )
pass
@patch("ambari_simplejson.dump")
def test_cluster_configuration_update(self, json_dump_mock):
cluster_configuration = self.__get_cluster_configuration()
configuration = {'foo-site' :
{ 'bar': 'rendered-bar', 'baz' : 'rendered-baz' }
}
osopen_mock, osfdopen_mock = self.__update_cluster_configuration(cluster_configuration, configuration)
osopen_mock.assert_called_with(os.sep + "tmp" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json",
TestClusterConfigurationCache.o_flags,
TestClusterConfigurationCache.perms);
osfdopen_mock.assert_called_with(11, "w")
json_dump_mock.assert_called_with({'c1': {'foo-site': {'baz': 'rendered-baz', 'bar': 'rendered-bar'}}}, ANY, indent=2)
pass
def __get_cluster_configuration(self):
"""
Gets an instance of the cluster cache where the file read and write
operations have been mocked out
:return:
"""
with patch("__builtin__.open") as open_mock:
open_mock.side_effect = self.open_side_effect
cluster_configuration = ClusterConfigurationCache(os.path.join(os.sep, "tmp", "bar", "baz"))
return cluster_configuration
@patch("os.open")
@patch("os.fdopen")
def __update_cluster_configuration(self, cluster_configuration, configuration, osfdopen_mock, osopen_mock):
"""
Updates the configuration cache, using as mock file as the disk based
cache so that a file is not created during tests
:return:
"""
osopen_mock.return_value = 11
cluster_configuration.update_cache("c1", configuration)
return osopen_mock, osfdopen_mock
def open_side_effect(self, file, mode):
if mode == 'w':
file_mock = MagicMock()
return file_mock
else:
return self.original_open(file, mode)
| 38.918182
| 122
| 0.723896
|
import os
import sys
from ambari_agent.ClusterConfigurationCache import ClusterConfigurationCache
from mock.mock import MagicMock, patch, mock_open, ANY
from unittest import TestCase
class TestClusterConfigurationCache(TestCase):
o_flags = os.O_WRONLY | os.O_CREAT
perms = 0o600
def setUp(self):
self.original_open = open
def tearDown(self):
sys.stdout == sys.__stdout__
@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("os.path.isfile", new = MagicMock(return_value=True))
def test_cluster_configuration_cache_initialization(self):
configuration_json = '{ "c1" : { "foo-site" : { "foo" : "bar", "foobar" : "baz" } } }'
open_mock = mock_open(read_data=configuration_json)
with patch("__builtin__.open", open_mock):
cluster_configuration = ClusterConfigurationCache(os.path.join(os.sep, "tmp", "bar", "baz"))
open_mock.assert_called_with(os.sep + "tmp" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json", 'r')
self.assertEqual('bar', cluster_configuration.get_configuration_value('c1', 'foo-site/foo') )
self.assertEqual('baz', cluster_configuration.get_configuration_value('c1', 'foo-site/foobar') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID/INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foo') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foobar') )
pass
@patch("ambari_simplejson.dump")
def test_cluster_configuration_update(self, json_dump_mock):
cluster_configuration = self.__get_cluster_configuration()
configuration = {'foo-site' :
{ 'bar': 'rendered-bar', 'baz' : 'rendered-baz' }
}
osopen_mock, osfdopen_mock = self.__update_cluster_configuration(cluster_configuration, configuration)
osopen_mock.assert_called_with(os.sep + "tmp" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json",
TestClusterConfigurationCache.o_flags,
TestClusterConfigurationCache.perms);
osfdopen_mock.assert_called_with(11, "w")
json_dump_mock.assert_called_with({'c1': {'foo-site': {'baz': 'rendered-baz', 'bar': 'rendered-bar'}}}, ANY, indent=2)
pass
def __get_cluster_configuration(self):
with patch("__builtin__.open") as open_mock:
open_mock.side_effect = self.open_side_effect
cluster_configuration = ClusterConfigurationCache(os.path.join(os.sep, "tmp", "bar", "baz"))
return cluster_configuration
@patch("os.open")
@patch("os.fdopen")
def __update_cluster_configuration(self, cluster_configuration, configuration, osfdopen_mock, osopen_mock):
osopen_mock.return_value = 11
cluster_configuration.update_cache("c1", configuration)
return osopen_mock, osfdopen_mock
def open_side_effect(self, file, mode):
if mode == 'w':
file_mock = MagicMock()
return file_mock
else:
return self.original_open(file, mode)
| true
| true
|
f7159dc06a6352dac967128fe0aa532b3e17b5a1
| 355
|
py
|
Python
|
nsd1802/python/day04/seqop.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
nsd1802/python/day04/seqop.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
nsd1802/python/day04/seqop.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
from random import randint
alist = list() # []
list('hello') # ['h', 'e', 'l', 'l', 'o']
list((10, 20, 30)) # [10, 20, 30] 元组转列表
astr = str() # ''
str(10) # '10'
str(['h', 'e', 'l', 'l', 'o']) # 将列表转成字符串
atuple = tuple() # ()
tuple('hello') # ('h', 'e', 'l', 'l', 'o')
num_list = [randint(1, 100) for i in range(10)]
max(num_list)
min(num_list)
| 25.357143
| 47
| 0.498592
|
from random import randint
alist = list()
list('hello')
list((10, 20, 30))
astr = str()
str(10)
str(['h', 'e', 'l', 'l', 'o'])
atuple = tuple()
tuple('hello')
num_list = [randint(1, 100) for i in range(10)]
max(num_list)
min(num_list)
| true
| true
|
f7159dfd3e5220cdf838857e63b85ecb77e79ba3
| 11,938
|
py
|
Python
|
venv/Lib/site-packages/praw/endpoints.py
|
Dartok-SD/Dartok-SD-s-reddit-bot
|
dc7a3215c062ed95b9f44bc207383e776c1692ea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/praw/endpoints.py
|
Dartok-SD/Dartok-SD-s-reddit-bot
|
dc7a3215c062ed95b9f44bc207383e776c1692ea
|
[
"MIT"
] | 1
|
2020-11-26T18:38:13.000Z
|
2020-11-27T15:25:49.000Z
|
praw/endpoints.py
|
leviroth/praw
|
8f05dd2a9188cbaf1fba067e429ad6552d952059
|
[
"BSD-2-Clause"
] | null | null | null |
"""List of API endpoints PRAW knows about."""
# flake8: noqa
# fmt: off
API_PATH = {
"about_edited": "r/{subreddit}/about/edited/",
"about_log": "r/{subreddit}/about/log/",
"about_modqueue": "r/{subreddit}/about/modqueue/",
"about_reports": "r/{subreddit}/about/reports/",
"about_spam": "r/{subreddit}/about/spam/",
"about_sticky": "r/{subreddit}/about/sticky/",
"about_stylesheet": "r/{subreddit}/about/stylesheet/",
"about_traffic": "r/{subreddit}/about/traffic/",
"about_unmoderated": "r/{subreddit}/about/unmoderated/",
"accept_mod_invite": "r/{subreddit}/api/accept_moderator_invite",
"approve": "api/approve/",
"block": "api/block",
"block_user": "/api/block_user/",
"blocked": "prefs/blocked/",
"collapse": "api/collapse_message/",
"collection": "api/v1/collections/collection",
"collection_add_post": "api/v1/collections/add_post_to_collection",
"collection_create": "api/v1/collections/create_collection",
"collection_delete": "api/v1/collections/delete_collection",
"collection_desc": "api/v1/collections/update_collection_description",
"collection_follow": "api/v1/collections/follow_collection",
"collection_remove_post": "api/v1/collections/remove_post_in_collection",
"collection_reorder": "api/v1/collections/reorder_collection",
"collection_subreddit": "api/v1/collections/subreddit_collections",
"collection_title": "api/v1/collections/update_collection_title",
"comment": "api/comment/",
"comment_replies": "message/comments/",
"compose": "api/compose/",
"contest_mode": "api/set_contest_mode/",
"del": "api/del/",
"delete_message": "api/del_msg",
"delete_sr_banner": "r/{subreddit}/api/delete_sr_banner",
"delete_sr_header": "r/{subreddit}/api/delete_sr_header",
"delete_sr_icon": "r/{subreddit}/api/delete_sr_icon",
"delete_sr_image": "r/{subreddit}/api/delete_sr_img",
"deleteflair": "r/{subreddit}/api/deleteflair",
"distinguish": "api/distinguish/",
"domain": "domain/{domain}/",
"duplicates": "duplicates/{submission_id}/",
"edit": "api/editusertext/",
"emoji_delete": "api/v1/{subreddit}/emoji/{emoji_name}",
"emoji_lease": "api/v1/{subreddit}/emoji_asset_upload_s3.json",
"emoji_list": "api/v1/{subreddit}/emojis/all",
"emoji_upload": "api/v1/{subreddit}/emoji.json",
"flair": "r/{subreddit}/api/flair/",
"flairconfig": "r/{subreddit}/api/flairconfig/",
"flaircsv": "r/{subreddit}/api/flaircsv/",
"flairlist": "r/{subreddit}/api/flairlist/",
"flairselector": "r/{subreddit}/api/flairselector/",
"flairtemplate": "r/{subreddit}/api/flairtemplate/",
"flairtemplate_v2": "r/{subreddit}/api/flairtemplate_v2",
"flairtemplateclear": "r/{subreddit}/api/clearflairtemplates/",
"flairtemplatedelete": "r/{subreddit}/api/deleteflairtemplate/",
"friend": "r/{subreddit}/api/friend/",
"friend_v1": "api/v1/me/friends/{user}",
"friends": "api/v1/me/friends/",
"gild_thing": "api/v1/gold/gild/{fullname}/",
"gild_user": "api/v1/gold/give/{username}/",
"hide": "api/hide/",
"ignore_reports": "api/ignore_reports/",
"inbox": "message/inbox/",
"info": "api/info/",
"karma": "api/v1/me/karma",
"leavecontributor": "api/leavecontributor",
"link_flair": "r/{subreddit}/api/link_flair_v2",
"list_banned": "r/{subreddit}/about/banned/",
"list_contributor": "r/{subreddit}/about/contributors/",
"list_moderator": "r/{subreddit}/about/moderators/",
"list_muted": "r/{subreddit}/about/muted/",
"list_wikibanned": "r/{subreddit}/about/wikibanned/",
"list_wikicontributor": "r/{subreddit}/about/wikicontributors/",
"live_accept_invite": "api/live/{id}/accept_contributor_invite",
"live_add_update": "api/live/{id}/update",
"live_close": "api/live/{id}/close_thread",
"live_contributors": "live/{id}/contributors",
"live_discussions": "live/{id}/discussions",
"live_focus": "live/{thread_id}/updates/{update_id}",
"live_info": "api/live/by_id/{ids}",
"live_invite": "api/live/{id}/invite_contributor",
"live_leave": "api/live/{id}/leave_contributor",
"live_now": "api/live/happening_now",
"live_remove_contrib": "api/live/{id}/rm_contributor",
"live_remove_invite": "api/live/{id}/rm_contributor_invite",
"live_remove_update": "api/live/{id}/delete_update",
"live_report": "api/live/{id}/report",
"live_strike": "api/live/{id}/strike_update",
"live_update_perms": "api/live/{id}/set_contributor_permissions",
"live_update_thread": "api/live/{id}/edit",
"live_updates": "live/{id}",
"liveabout": "api/live/{id}/about/",
"livecreate": "api/live/create",
"lock": "api/lock/",
"marknsfw": "api/marknsfw/",
"me": "api/v1/me",
"media_asset": "api/media/asset.json",
"mentions": "message/mentions",
"message": "message/messages/{id}/",
"messages": "message/messages/",
"moderator_messages": "r/{subreddit}/message/moderator/",
"moderator_unread": "r/{subreddit}/message/moderator/unread/",
"modmail_archive": "api/mod/conversations/{id}/archive",
"modmail_bulk_read": "api/mod/conversations/bulk/read",
"modmail_conversation": "api/mod/conversations/{id}",
"modmail_conversations": "api/mod/conversations/",
"modmail_highlight": "api/mod/conversations/{id}/highlight",
"modmail_mute": "api/mod/conversations/{id}/mute",
"modmail_read": "api/mod/conversations/read",
"modmail_subreddits": "api/mod/conversations/subreddits",
"modmail_unarchive": "api/mod/conversations/{id}/unarchive",
"modmail_unmute": "api/mod/conversations/{id}/unmute",
"modmail_unread": "api/mod/conversations/unread",
"modmail_unread_count": "api/mod/conversations/unread/count",
"morechildren": "api/morechildren/",
"multireddit": "user/{user}/m/{multi}/",
"multireddit_api": "api/multi/user/{user}/m/{multi}/",
"multireddit_base": "api/multi/",
"multireddit_copy": "api/multi/copy/",
"multireddit_rename": "api/multi/rename/",
"multireddit_update": "api/multi/user/{user}/m/{multi}/r/{subreddit}",
"multireddit_user": "api/multi/user/{user}/",
"mute_sender": "api/mute_message_author/",
"my_contributor": "subreddits/mine/contributor/",
"my_moderator": "subreddits/mine/moderator/",
"my_multireddits": "api/multi/mine/",
"my_subreddits": "subreddits/mine/subscriber/",
"preferences": "api/v1/me/prefs",
"quarantine_opt_in": "api/quarantine_optin",
"quarantine_opt_out": "api/quarantine_optout",
"read_message": "api/read_message/",
"removal_comment_message": "api/v1/modactions/removal_comment_message",
"removal_link_message": "api/v1/modactions/removal_link_message",
"remove": "api/remove/",
"report": "api/report/",
"rules": "r/{subreddit}/about/rules",
"save": "api/save/",
"search": "r/{subreddit}/search/",
"select_flair": "r/{subreddit}/api/selectflair/",
"sendreplies": "api/sendreplies",
"sent": "message/sent/",
"setpermissions": "r/{subreddit}/api/setpermissions/",
"site_admin": "api/site_admin/",
"spoiler": "api/spoiler/",
"sticky_submission": "api/set_subreddit_sticky/",
"store_visits": "api/store_visits",
"structured_styles": "api/v1/structured_styles/{subreddit}",
"style_asset_lease": "api/v1/style_asset_upload_s3/{subreddit}",
"sub_recommended": "api/recommend/sr/{subreddits}",
"submission": "comments/{id}/",
"submission_replies": "message/selfreply/",
"submit": "api/submit/",
"subreddit": "r/{subreddit}/",
"subreddit_about": "r/{subreddit}/about/",
"subreddit_filter": "api/filter/user/{user}/f/{special}/r/{subreddit}",
"subreddit_filter_list": "api/filter/user/{user}/f/{special}",
"subreddit_random": "r/{subreddit}/random/",
"subreddit_settings": "r/{subreddit}/about/edit/",
"subreddit_stylesheet": "r/{subreddit}/api/subreddit_stylesheet/",
"subreddits_by_topic": "api/subreddits_by_topic",
"subreddits_default": "subreddits/default/",
"subreddits_gold": "subreddits/gold/",
"subreddits_name_search": "api/search_reddit_names/",
"subreddits_new": "subreddits/new/",
"subreddits_popular": "subreddits/popular/",
"subreddits_search": "subreddits/search/",
"subscribe": "api/subscribe/",
"suggested_sort": "api/set_suggested_sort/",
"trophies": "api/v1/user/{user}/trophies",
"uncollapse": "api/uncollapse_message/",
"unfriend": "r/{subreddit}/api/unfriend/",
"unhide": "api/unhide/",
"unignore_reports": "api/unignore_reports/",
"unlock": "api/unlock/",
"unmarknsfw": "api/unmarknsfw/",
"unmute_sender": "api/unmute_message_author/",
"unread": "message/unread/",
"unread_message": "api/unread_message/",
"unsave": "api/unsave/",
"unspoiler": "api/unspoiler/",
"upload_image": "r/{subreddit}/api/upload_sr_img",
"user": "user/{user}/",
"user_about": "user/{user}/about/",
"user_flair": "r/{subreddit}/api/user_flair_v2",
"users_new": "users/new",
"users_popular": "users/popular",
"users_search": "users/search",
"vote": "api/vote/",
"widget_create": "r/{subreddit}/api/widget",
"widget_lease": "r/{subreddit}/api/widget_image_upload_s3",
"widget_modify": "r/{subreddit}/api/widget/{widget_id}",
"widget_order": "r/{subreddit}/api/widget_order/{section}",
"widgets": "r/{subreddit}/api/widgets",
"wiki_edit": "r/{subreddit}/api/wiki/edit/",
"wiki_page": "r/{subreddit}/wiki/{page}",
"wiki_page_editor": "r/{subreddit}/api/wiki/alloweditor/{method}",
"wiki_page_revisions": "r/{subreddit}/wiki/revisions/{page}",
"wiki_page_settings": "r/{subreddit}/wiki/settings/{page}",
"wiki_pages": "r/{subreddit}/wiki/pages/",
"wiki_revisions": "r/{subreddit}/wiki/revisions/",
}
| 58.234146
| 82
| 0.554951
|
API_PATH = {
"about_edited": "r/{subreddit}/about/edited/",
"about_log": "r/{subreddit}/about/log/",
"about_modqueue": "r/{subreddit}/about/modqueue/",
"about_reports": "r/{subreddit}/about/reports/",
"about_spam": "r/{subreddit}/about/spam/",
"about_sticky": "r/{subreddit}/about/sticky/",
"about_stylesheet": "r/{subreddit}/about/stylesheet/",
"about_traffic": "r/{subreddit}/about/traffic/",
"about_unmoderated": "r/{subreddit}/about/unmoderated/",
"accept_mod_invite": "r/{subreddit}/api/accept_moderator_invite",
"approve": "api/approve/",
"block": "api/block",
"block_user": "/api/block_user/",
"blocked": "prefs/blocked/",
"collapse": "api/collapse_message/",
"collection": "api/v1/collections/collection",
"collection_add_post": "api/v1/collections/add_post_to_collection",
"collection_create": "api/v1/collections/create_collection",
"collection_delete": "api/v1/collections/delete_collection",
"collection_desc": "api/v1/collections/update_collection_description",
"collection_follow": "api/v1/collections/follow_collection",
"collection_remove_post": "api/v1/collections/remove_post_in_collection",
"collection_reorder": "api/v1/collections/reorder_collection",
"collection_subreddit": "api/v1/collections/subreddit_collections",
"collection_title": "api/v1/collections/update_collection_title",
"comment": "api/comment/",
"comment_replies": "message/comments/",
"compose": "api/compose/",
"contest_mode": "api/set_contest_mode/",
"del": "api/del/",
"delete_message": "api/del_msg",
"delete_sr_banner": "r/{subreddit}/api/delete_sr_banner",
"delete_sr_header": "r/{subreddit}/api/delete_sr_header",
"delete_sr_icon": "r/{subreddit}/api/delete_sr_icon",
"delete_sr_image": "r/{subreddit}/api/delete_sr_img",
"deleteflair": "r/{subreddit}/api/deleteflair",
"distinguish": "api/distinguish/",
"domain": "domain/{domain}/",
"duplicates": "duplicates/{submission_id}/",
"edit": "api/editusertext/",
"emoji_delete": "api/v1/{subreddit}/emoji/{emoji_name}",
"emoji_lease": "api/v1/{subreddit}/emoji_asset_upload_s3.json",
"emoji_list": "api/v1/{subreddit}/emojis/all",
"emoji_upload": "api/v1/{subreddit}/emoji.json",
"flair": "r/{subreddit}/api/flair/",
"flairconfig": "r/{subreddit}/api/flairconfig/",
"flaircsv": "r/{subreddit}/api/flaircsv/",
"flairlist": "r/{subreddit}/api/flairlist/",
"flairselector": "r/{subreddit}/api/flairselector/",
"flairtemplate": "r/{subreddit}/api/flairtemplate/",
"flairtemplate_v2": "r/{subreddit}/api/flairtemplate_v2",
"flairtemplateclear": "r/{subreddit}/api/clearflairtemplates/",
"flairtemplatedelete": "r/{subreddit}/api/deleteflairtemplate/",
"friend": "r/{subreddit}/api/friend/",
"friend_v1": "api/v1/me/friends/{user}",
"friends": "api/v1/me/friends/",
"gild_thing": "api/v1/gold/gild/{fullname}/",
"gild_user": "api/v1/gold/give/{username}/",
"hide": "api/hide/",
"ignore_reports": "api/ignore_reports/",
"inbox": "message/inbox/",
"info": "api/info/",
"karma": "api/v1/me/karma",
"leavecontributor": "api/leavecontributor",
"link_flair": "r/{subreddit}/api/link_flair_v2",
"list_banned": "r/{subreddit}/about/banned/",
"list_contributor": "r/{subreddit}/about/contributors/",
"list_moderator": "r/{subreddit}/about/moderators/",
"list_muted": "r/{subreddit}/about/muted/",
"list_wikibanned": "r/{subreddit}/about/wikibanned/",
"list_wikicontributor": "r/{subreddit}/about/wikicontributors/",
"live_accept_invite": "api/live/{id}/accept_contributor_invite",
"live_add_update": "api/live/{id}/update",
"live_close": "api/live/{id}/close_thread",
"live_contributors": "live/{id}/contributors",
"live_discussions": "live/{id}/discussions",
"live_focus": "live/{thread_id}/updates/{update_id}",
"live_info": "api/live/by_id/{ids}",
"live_invite": "api/live/{id}/invite_contributor",
"live_leave": "api/live/{id}/leave_contributor",
"live_now": "api/live/happening_now",
"live_remove_contrib": "api/live/{id}/rm_contributor",
"live_remove_invite": "api/live/{id}/rm_contributor_invite",
"live_remove_update": "api/live/{id}/delete_update",
"live_report": "api/live/{id}/report",
"live_strike": "api/live/{id}/strike_update",
"live_update_perms": "api/live/{id}/set_contributor_permissions",
"live_update_thread": "api/live/{id}/edit",
"live_updates": "live/{id}",
"liveabout": "api/live/{id}/about/",
"livecreate": "api/live/create",
"lock": "api/lock/",
"marknsfw": "api/marknsfw/",
"me": "api/v1/me",
"media_asset": "api/media/asset.json",
"mentions": "message/mentions",
"message": "message/messages/{id}/",
"messages": "message/messages/",
"moderator_messages": "r/{subreddit}/message/moderator/",
"moderator_unread": "r/{subreddit}/message/moderator/unread/",
"modmail_archive": "api/mod/conversations/{id}/archive",
"modmail_bulk_read": "api/mod/conversations/bulk/read",
"modmail_conversation": "api/mod/conversations/{id}",
"modmail_conversations": "api/mod/conversations/",
"modmail_highlight": "api/mod/conversations/{id}/highlight",
"modmail_mute": "api/mod/conversations/{id}/mute",
"modmail_read": "api/mod/conversations/read",
"modmail_subreddits": "api/mod/conversations/subreddits",
"modmail_unarchive": "api/mod/conversations/{id}/unarchive",
"modmail_unmute": "api/mod/conversations/{id}/unmute",
"modmail_unread": "api/mod/conversations/unread",
"modmail_unread_count": "api/mod/conversations/unread/count",
"morechildren": "api/morechildren/",
"multireddit": "user/{user}/m/{multi}/",
"multireddit_api": "api/multi/user/{user}/m/{multi}/",
"multireddit_base": "api/multi/",
"multireddit_copy": "api/multi/copy/",
"multireddit_rename": "api/multi/rename/",
"multireddit_update": "api/multi/user/{user}/m/{multi}/r/{subreddit}",
"multireddit_user": "api/multi/user/{user}/",
"mute_sender": "api/mute_message_author/",
"my_contributor": "subreddits/mine/contributor/",
"my_moderator": "subreddits/mine/moderator/",
"my_multireddits": "api/multi/mine/",
"my_subreddits": "subreddits/mine/subscriber/",
"preferences": "api/v1/me/prefs",
"quarantine_opt_in": "api/quarantine_optin",
"quarantine_opt_out": "api/quarantine_optout",
"read_message": "api/read_message/",
"removal_comment_message": "api/v1/modactions/removal_comment_message",
"removal_link_message": "api/v1/modactions/removal_link_message",
"remove": "api/remove/",
"report": "api/report/",
"rules": "r/{subreddit}/about/rules",
"save": "api/save/",
"search": "r/{subreddit}/search/",
"select_flair": "r/{subreddit}/api/selectflair/",
"sendreplies": "api/sendreplies",
"sent": "message/sent/",
"setpermissions": "r/{subreddit}/api/setpermissions/",
"site_admin": "api/site_admin/",
"spoiler": "api/spoiler/",
"sticky_submission": "api/set_subreddit_sticky/",
"store_visits": "api/store_visits",
"structured_styles": "api/v1/structured_styles/{subreddit}",
"style_asset_lease": "api/v1/style_asset_upload_s3/{subreddit}",
"sub_recommended": "api/recommend/sr/{subreddits}",
"submission": "comments/{id}/",
"submission_replies": "message/selfreply/",
"submit": "api/submit/",
"subreddit": "r/{subreddit}/",
"subreddit_about": "r/{subreddit}/about/",
"subreddit_filter": "api/filter/user/{user}/f/{special}/r/{subreddit}",
"subreddit_filter_list": "api/filter/user/{user}/f/{special}",
"subreddit_random": "r/{subreddit}/random/",
"subreddit_settings": "r/{subreddit}/about/edit/",
"subreddit_stylesheet": "r/{subreddit}/api/subreddit_stylesheet/",
"subreddits_by_topic": "api/subreddits_by_topic",
"subreddits_default": "subreddits/default/",
"subreddits_gold": "subreddits/gold/",
"subreddits_name_search": "api/search_reddit_names/",
"subreddits_new": "subreddits/new/",
"subreddits_popular": "subreddits/popular/",
"subreddits_search": "subreddits/search/",
"subscribe": "api/subscribe/",
"suggested_sort": "api/set_suggested_sort/",
"trophies": "api/v1/user/{user}/trophies",
"uncollapse": "api/uncollapse_message/",
"unfriend": "r/{subreddit}/api/unfriend/",
"unhide": "api/unhide/",
"unignore_reports": "api/unignore_reports/",
"unlock": "api/unlock/",
"unmarknsfw": "api/unmarknsfw/",
"unmute_sender": "api/unmute_message_author/",
"unread": "message/unread/",
"unread_message": "api/unread_message/",
"unsave": "api/unsave/",
"unspoiler": "api/unspoiler/",
"upload_image": "r/{subreddit}/api/upload_sr_img",
"user": "user/{user}/",
"user_about": "user/{user}/about/",
"user_flair": "r/{subreddit}/api/user_flair_v2",
"users_new": "users/new",
"users_popular": "users/popular",
"users_search": "users/search",
"vote": "api/vote/",
"widget_create": "r/{subreddit}/api/widget",
"widget_lease": "r/{subreddit}/api/widget_image_upload_s3",
"widget_modify": "r/{subreddit}/api/widget/{widget_id}",
"widget_order": "r/{subreddit}/api/widget_order/{section}",
"widgets": "r/{subreddit}/api/widgets",
"wiki_edit": "r/{subreddit}/api/wiki/edit/",
"wiki_page": "r/{subreddit}/wiki/{page}",
"wiki_page_editor": "r/{subreddit}/api/wiki/alloweditor/{method}",
"wiki_page_revisions": "r/{subreddit}/wiki/revisions/{page}",
"wiki_page_settings": "r/{subreddit}/wiki/settings/{page}",
"wiki_pages": "r/{subreddit}/wiki/pages/",
"wiki_revisions": "r/{subreddit}/wiki/revisions/",
}
| true
| true
|
f7159f9ba44b38e5dac9b34e58d1d994a96098c0
| 5,561
|
py
|
Python
|
autotorrent/clients/tests/test_transmission.py
|
jyggen/autotorrent
|
5a8f2b40ccc8c66c73dc520f98b886d21e163afa
|
[
"MIT"
] | 278
|
2015-02-12T19:19:53.000Z
|
2022-03-22T21:17:28.000Z
|
autotorrent/clients/tests/test_transmission.py
|
jyggen/autotorrent
|
5a8f2b40ccc8c66c73dc520f98b886d21e163afa
|
[
"MIT"
] | 56
|
2015-03-27T00:38:37.000Z
|
2022-03-26T17:52:58.000Z
|
autotorrent/clients/tests/test_transmission.py
|
jyggen/autotorrent
|
5a8f2b40ccc8c66c73dc520f98b886d21e163afa
|
[
"MIT"
] | 48
|
2015-03-10T16:50:19.000Z
|
2022-03-20T12:11:50.000Z
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from ...bencode import bdecode
from ..transmission import TransmissionClient as RealTransmissionClient
current_path = os.path.dirname(__file__)
class TransmissionClient(RealTransmissionClient):
def __init__(self, *args, **kwargs):
super(TransmissionClient, self).__init__(*args, **kwargs)
self._torrents = {}
self._torrent_id = 1
def call(self, method, **kwargs):
_ = json.dumps(kwargs)
if method == 'session-get':
return {'version': 'version: 2.82 (14160)',
'config-dir': '/home/autotorrent/.config/transmission-daemon',
'download-dir': '/home/autotorrent/Downloads',
'rpc-version': 15}
elif method == 'torrent-add':
self._torrent_id += 1
self._torrents[self._torrent_id] = kwargs
return {'torrent-added': {'id': self._torrent_id}}
elif method == 'torrent-rename-path':
self._torrents[kwargs['ids'][0]].update(kwargs)
return {}
elif method == 'torrent-start':
self._torrents[kwargs['ids'][0]]['paused'] = False
return {}
else:
raise Exception(method, kwargs)
class TestTransmissionClient(TestCase):
def setUp(self):
self.client = TransmissionClient('http://127.0.0.1:9091')
self._temp_path = tempfile.mkdtemp()
def tearDown(self):
if self._temp_path.startswith('/tmp'): # paranoid-mon, the best pokemon.
shutil.rmtree(self._temp_path)
def test_test_connection(self):
self.assertEqual(self.client.test_connection(), "version: 2.82 (14160), config-dir: /home/autotorrent/.config/transmission-daemon, download-dir: /home/autotorrent/Downloads")
def _add_torrent_with_links(self, letters):
with open(os.path.join(current_path, 'test.torrent'), 'rb') as f:
torrent = bdecode(f.read())
files = []
for letter in ['a', 'b', 'c']:
filename = 'file_%s.txt' % letter
files.append({
'completed': (letter in letters),
'length': 11,
'path': ['tmp', filename],
})
return self.client.add_torrent(torrent, '/tmp/', files)
def test_add_torrent_complete(self):
self.assertTrue(self._add_torrent_with_links(['a', 'b', 'c']))
self.assertTrue((2 in self.client._torrents))
self.assertEqual(self.client._torrents[2]['paused'], False)
def test_auto_config_successful_config(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '0.0.0.0',
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is not None)
self.assertEqual(tc.get_config(), {
'url': 'http://127.0.0.1:12312/transmission/rpc'
})
def test_auto_config_successful_differnet_bind_ip_config(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is not None)
self.assertEqual(tc.get_config(), {
'url': 'http://127.22.54.99:12312/transmission/rpc'
})
def test_auto_config_unsuccessful_missing_ip(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
def test_auto_config_unsuccessful_missing_port(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
def test_auto_config_unsuccessful_problematic_file(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
'rpc-port': 12312,
}, f)
os.chmod(os.path.join(config_path, 'settings.json'), 0)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
| 36.827815
| 182
| 0.58209
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from ...bencode import bdecode
from ..transmission import TransmissionClient as RealTransmissionClient
current_path = os.path.dirname(__file__)
class TransmissionClient(RealTransmissionClient):
def __init__(self, *args, **kwargs):
super(TransmissionClient, self).__init__(*args, **kwargs)
self._torrents = {}
self._torrent_id = 1
def call(self, method, **kwargs):
_ = json.dumps(kwargs)
if method == 'session-get':
return {'version': 'version: 2.82 (14160)',
'config-dir': '/home/autotorrent/.config/transmission-daemon',
'download-dir': '/home/autotorrent/Downloads',
'rpc-version': 15}
elif method == 'torrent-add':
self._torrent_id += 1
self._torrents[self._torrent_id] = kwargs
return {'torrent-added': {'id': self._torrent_id}}
elif method == 'torrent-rename-path':
self._torrents[kwargs['ids'][0]].update(kwargs)
return {}
elif method == 'torrent-start':
self._torrents[kwargs['ids'][0]]['paused'] = False
return {}
else:
raise Exception(method, kwargs)
class TestTransmissionClient(TestCase):
def setUp(self):
self.client = TransmissionClient('http://127.0.0.1:9091')
self._temp_path = tempfile.mkdtemp()
def tearDown(self):
if self._temp_path.startswith('/tmp'):
shutil.rmtree(self._temp_path)
def test_test_connection(self):
self.assertEqual(self.client.test_connection(), "version: 2.82 (14160), config-dir: /home/autotorrent/.config/transmission-daemon, download-dir: /home/autotorrent/Downloads")
def _add_torrent_with_links(self, letters):
with open(os.path.join(current_path, 'test.torrent'), 'rb') as f:
torrent = bdecode(f.read())
files = []
for letter in ['a', 'b', 'c']:
filename = 'file_%s.txt' % letter
files.append({
'completed': (letter in letters),
'length': 11,
'path': ['tmp', filename],
})
return self.client.add_torrent(torrent, '/tmp/', files)
def test_add_torrent_complete(self):
self.assertTrue(self._add_torrent_with_links(['a', 'b', 'c']))
self.assertTrue((2 in self.client._torrents))
self.assertEqual(self.client._torrents[2]['paused'], False)
def test_auto_config_successful_config(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '0.0.0.0',
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is not None)
self.assertEqual(tc.get_config(), {
'url': 'http://127.0.0.1:12312/transmission/rpc'
})
def test_auto_config_successful_differnet_bind_ip_config(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is not None)
self.assertEqual(tc.get_config(), {
'url': 'http://127.22.54.99:12312/transmission/rpc'
})
def test_auto_config_unsuccessful_missing_ip(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
def test_auto_config_unsuccessful_missing_port(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
def test_auto_config_unsuccessful_problematic_file(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
'rpc-port': 12312,
}, f)
os.chmod(os.path.join(config_path, 'settings.json'), 0)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
| true
| true
|
f7159fe0ba18267bd61a13e1aaa108634eb930fb
| 18,619
|
py
|
Python
|
src/test/ClusterClassifier.py
|
fermi-lat/CalRecon
|
69e123b523770baa1fc9e8f3b78e211b1064b0c0
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/ClusterClassifier.py
|
fermi-lat/CalRecon
|
69e123b523770baa1fc9e8f3b78e211b1064b0c0
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/ClusterClassifier.py
|
fermi-lat/CalRecon
|
69e123b523770baa1fc9e8f3b78e211b1064b0c0
|
[
"BSD-3-Clause"
] | null | null | null |
import ROOT
import time
import os
from array import array
from math import sqrt
from pXmlWriter import *
from ClusterConfig import *
class ClusterClassifier:
def __init__(self, varBins=True):
print 'Opening files...'
self.RootTreeDict = {}
for (topology, filePathList) in TRAIN_FILE_PATH_DICT.items():
self.RootTreeDict[topology] = ROOT.TChain('MeritTuple')
for file in filePathList:
self.RootTreeDict[topology].Add(file)
print 'Creating histograms for pdfs...'
self.PdfHistDict = {}
self.PdfHistSliceDict = {}
self.PdfVarBinsDict = {}
for topology in TRAIN_FILE_PATH_DICT.keys():
self.PdfHistDict[topology] = {}
self.PdfHistSliceDict[topology] = {}
self.PdfVarBinsDict[topology] = {}
for var in VARIABLE_LIST:
if varBins:
print 'Processing %s for %s with varBins' % (var, topology)
self.__createHistSlice(var, topology)
else:
print 'Processing %s for %s with fixedBins'%(var, topology)
self.__createPdfHist(var, topology)
print 'Done.'
def __setupTrainDict(self):
self.TrainDict = {}
for (topology,filePath) in FILE_PATH_DICT.items():
TRAIN_FILE_LIST = []
fileName = os.path.basename(filePath)
fileName = fileName.split('-')[0]
print fileName
TRAIN_FILE_LIST = getTrainFilePath(fileName)
self.TrainDict[topology] = TRAIN_FILE_PATH_LIST
#print self.TrainDict
def __createHistSlice(self, var, topology):
# Create the equal populated bin histogram...
#Get a precut of needed for the topology.
cut = getCut(topology)
self.PdfHistSliceDict[topology][var.Label] = {}
self.PdfVarBinsDict[topology][var.Label] = {}
#Project info to hist for each energy range and add to dict.
#Call variableBinning method and re-project to histo.
for i,(Emin,Emax) in enumerate(ENERGY_BINS):
Ecut = "&&(log10(CalEnergyRaw)>=%s&&log10(CalEnergyRaw)<=%s)"\
%(Emin,Emax)
fullCut = cut+Ecut
## Get the min and max value for this variable in this energy
## range and for this topology. I then use this range to
## project the meritTuple onto a histo with this xrange. By
## using different xranges for each energy, topology and variable
## I hope to be able to pick better binning in each case.
if self.RootTreeDict[topology].GetEntries(str(fullCut)) > 0:
(xmin,xmax) = \
self.getBoundValues(topology,var.Expression, fullCut)
else:
xmin = var.MinValue
xmax = var.MaxValue
print "Bounds for var %s are (%s,%s)"%(var.Expression,xmin,xmax)
hName = hname(var.Label, topology,i)
hTitle = '%s P.D.F. (%s) logE (%s,%s)' %\
(var.Expression, topology,Emin,Emax)
h = ROOT.TH1F(hName, hTitle, INI_NUM_BINS, xmin, xmax)
#Here I project without specifying the binning.
h.SetTitle(var.Expression)
self.RootTreeDict[topology].Project(hName, var.Expression, fullCut)
self.PdfHistDict[topology][var.Label] = h
h.SetTitle('Projection_equalBins')
h.SetLineColor(getColor(topology))
h.GetXaxis().SetLabelSize(0.06)
h.GetYaxis().SetLabelSize(0.06)
h.GetXaxis().SetTitleSize(0.06)
h.GetXaxis().SetTitleOffset(0.80)
#h.Draw()
print "%s cut:%s xrange: (%s,%s)"%(topology,fullCut,xmin,xmax)
#ROOT.gPad.Update()
# raw_input()
self.PdfHistSliceDict[topology][var.Label][i] = h
self.getVariableBinning(var,topology,i,xmin,xmax)
Varbinning = self.PdfVarBinsDict[topology][var.Label][i]
numBins = len(Varbinning) - 1
print Varbinning,numBins
NewhName = hVarname(var.Label, topology,i)
NewhTitle = '%s varBin P.D.F. (%s) logE (%s,%s)' %\
(var.Expression, topology,Emin,Emax)
PdfSlice = ROOT.TH1F(NewhName, NewhTitle, numBins, Varbinning)
if numBins > 1:
self.RootTreeDict[topology].Project(NewhName,var.Expression, fullCut)
else:
print "Only one bin! Setting histo to zero!"
PdfSlice.SetBinContent(1,0)
PdfSlice.Draw()
ROOT.gPad.Update()
#raw_input()
self.PdfVarBinsDict[topology][var.Label][i] = PdfSlice
def __createPdfHist(self, var, topology):
# Create the two-dimensional histogram...
hName = hname(var.Label, topology)
hTitle = '%s P.D.F. (%s)' % (var.Expression, topology)
h = ROOT.TH2F(hName, hTitle, NUM_E_BINS, LOG_E_MIN, LOG_E_MAX,
var.NumBins, var.MinValue, var.MaxValue)
h.SetXTitle('log10(CalEnergyRaw)')
h.SetYTitle(var.Expression)
self.PdfHistDict[topology][var.Label] = h
expr = '%s:log10(CalEnergyRaw)' % var.Expression
cut = getCut(topology)
self.RootTreeDict[topology].Project(hName, expr, cut)
# ... then normalize the vertical slices.
normalizeSlices(h)
# ... finally create a TH1 for each slice.
self.PdfHistSliceDict[topology][var.Label] = {}
for i in range(NUM_E_BINS):
hSlice = h.ProjectionY('%s_slice%d' % (h.GetName(), i), i+1, i+1)
hSlice.SetTitle('P.D.F.')
hSlice.SetLineColor(getColor(topology))
hSlice.GetXaxis().SetLabelSize(0.06)
hSlice.GetYaxis().SetLabelSize(0.06)
hSlice.GetXaxis().SetTitleSize(0.06)
hSlice.GetXaxis().SetTitleOffset(0.80)
self.PdfHistSliceDict[topology][var.Label][i] = hSlice
def getBoundValues(self, topology, expr, cut = ''):
""" Retrieve the maximum and minimum values for a generic expression.
This is unelegant in that, in order not to loop over the event,
which is slow in python, the chain is projected over a temporary
histogram wich is then deleted. Unfortunately for a generic expression
we cannot handle this with ROOT.TTree.GetMaximum/MinValue().
"""
print 'Retrieving bound values for "%s"...' % expr
self.RootTreeDict[topology].Project('temphist', expr, str(cut))
htemp = ROOT.gDirectory.Get('temphist')
numBins = htemp.GetNbinsX()
minValue = htemp.GetBinCenter(1) - 0.5*htemp.GetBinWidth(1)
maxValue = htemp.GetBinCenter(numBins) + 0.5*htemp.GetBinWidth(numBins)
print "Initial min and max from getBoundValues (%s,%s)"%(minValue,maxValue)
"""
##Here I am cutting out a given amount from the tail of the
##distributions (you can set it via frac). I found that it was
##messing up the classification. So for I am commenting this part
##and taking everything, most likely I will need to think of a more
##clever way to bin my histos.
#htemp.Draw()
#ROOT.gPad.Update()
#raw_input()
Nbins = htemp.GetNbinsX()
totalEntries = htemp.GetEntries()
tot = 0
for i in range(1,Nbins + 1):
tot += htemp.GetBinContent(i)
try:
frac = tot/totalEntries
except ZeroDivisionError:
frac = 0
binLowEdge = htemp.GetBinLowEdge(i)
binWidth = htemp.GetBinWidth(i)
binHighEdge = binLowEdge + binWidth
if frac<0.99:
maxValue = binHighEdge
#ROOT.gPad.Update()
"""
htemp.Delete()
##Final check that if the variable is defined between 0 and 1, the upper and lower
#bounds of the variable should not be different from 0 and 1.
if expr=="Cal1CoreEneFrac":
minValue = 0.0
maxValue = 1.0
# logger.debug('Done, returning (%.3f, %.3f)' % (minValue, maxValue))
return (minValue, maxValue)
def getBinInfo(self,var,topology,i):
#Decide the number of bins to use based on the statistics in the
#histo. A histo with less than 200 entries should not have more
## than one bin.
histo = self.PdfHistSliceDict[topology][var.Label][i]
TotalEntries = histo.GetEntries()
if TotalEntries >200.0:
Numbins = min(var.NumBins,sqrt(TotalEntries))
print "Tot:%s, going to use %s bins "%\
(TotalEntries,Numbins)
elif TotalEntries<=200.0:
print "** Total entries less than 200 (%s)! Going to use 1 bin!"%\
TotalEntries
Numbins = 1.0
NeededEntries = TotalEntries/Numbins
PADDING = sqrt(NeededEntries)
print "Needed entries %s" % NeededEntries
return NeededEntries, PADDING,Numbins
def getVariableBinning(self,var,topology,i,xmin,xmax):
binList = []
counter = 0
tot = 0
histo = self.PdfHistSliceDict[topology][var.Label][i]
TotalEntries = histo.GetEntries()
(NeededEntries, PADDING,Numbins) = self.getBinInfo(var,topology,i)
#If there are less than 200 entries in histo, use one single bin over
#entire range, otherwise calculate variable bins. NumBins, NeededEntries,
#PADDING decided in self.getNumBins.
if Numbins>1:
binList.append(xmin)
for bin in range(1,INI_NUM_BINS + 1):
entriesPerBin = histo.GetBinContent(bin)
binLowEdge = histo.GetBinLowEdge(bin)
binWidth = histo.GetBinWidth(bin)
binHighEdge = binLowEdge + binWidth
counter+=entriesPerBin
#Check that the amount of entries is equal to needed
#entries and define the new bin width.
if counter>=NeededEntries - PADDING:
totFrac = counter/TotalEntries
print "***totFrac = %s\t counter:%s"%(totFrac,counter)
#I want to make sure that I have not reached the total
#amount of entries.
if counter!=(TotalEntries):
print "%s.) %s %s %.4f"%\
(bin,counter,NeededEntries,binHighEdge)
binList.append(binHighEdge)
counter = 0
#If I have reached the end of the histo, make sure to append to
#the binlist the max xrange so that the full range containing
#the 98% (this comes from the fact that in getBoundValues() I cut
#out 2% of the tails of the distributions) of the events is
#covered in the binning. -----This is not being used at the moment!!!
if binList[-1]<xmax:
print "Last value in the list is ",binList[-1]
binList.append(xmax)
print "Adding max to bin list!"
else:
binList = [xmin,xmax]
print "NumBins is equal to 1, taking %s as bins!"%binList
NewBins = array('f',binList)
self.PdfVarBinsDict[topology][var.Label][i] = NewBins
def drawAllPdfHists(self):
for var in VARIABLE_LIST:
self.drawPdfHists(var)
def drawPdfHists(self, var):
cName = '%s_2d' % var.Label
cTitle = '%s (2d)' % var.Expression
c = ROOT.TCanvas(cName, cTitle, 1000, 800)
toPool(c)
c.Divide(2, 2)
for (i, topology) in enumerate(FILE_PATH_DICT.keys()):
c.cd(i + 1)
ROOT.gPad.SetRightMargin(0.15)
self.getPdfHist(topology, var).Draw('colz,text')
ROOT.gPad.SetLogz(True)
c.cd()
#c.Update()
cName = '%s_slices' % var.Label
cTitle = '%s (slices)' % var.Expression
c = ROOT.TCanvas(cName, cTitle, 1000, 600)
toPool(c)
c.Divide(4, 3)
for i in range(NUM_E_BINS):
legend = ROOT.TLegend(0.65, 0.67, 0.90, 0.85)
legend.SetName('%s_legend_slice%d' % (var.Label, i))
legend.SetFillStyle(0)
legend.SetLineStyle(0)
legend.SetLineWidth(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.08)
toPool(legend)
c.cd(i + 1)
ymax = 0
for (j, topology) in enumerate(FILE_PATH_DICT.keys()):
hSlice = self.getPdfSliceHist(topology, var, i)
y = hSlice.GetMaximum()
if y > ymax:
ymax = y
for (j, topology) in enumerate(FILE_PATH_DICT.keys()):
hSlice = self.getPdfSliceHist(topology, var, i)
hSlice.SetMaximum(1.2*ymax)
hSlice.Draw('same'*(j!=0))
legend.AddEntry(hSlice, topology)
legend.Draw()
logemin = LOG_E_MIN +\
i*float(LOG_E_MAX - LOG_E_MIN)/float(NUM_E_BINS)
logemax = LOG_E_MIN +\
(i + 1)*float(LOG_E_MAX - LOG_E_MIN)/float(NUM_E_BINS)
emin = (10**logemin)
emax = (10**logemax)
label = ROOT.TLatex(0.15, 0.8, '%.d--%d MeV' %\
(emin, emax))
label.SetName('%s_label_slice%d' % (var.Label, i))
label.SetTextSize(0.06)
label.SetNDC()
toPool(label)
label.Draw()
c.cd()
#c.Update()
def getPdfHist(self, topology, var):
return self.PdfHistDict[topology][var.Label]
def getVarBinHistSlice(self,topology,var,i):
return self.PdfVarBinsDict[topology][var.Label][i]
def getPdfSliceHist(self, topology, var, i):
return self.PdfHistSliceDict[topology][var.Label][i]
def getSliceInfo(self,histo):
infoList = []
numBins = histo.GetNbinsX()
numEntries = histo.GetEntries()
print "Number of bins:",numBins, numEntries
sumProb = 0
for i in xrange(1, numBins + 1):
binWidth = histo.GetBinWidth(i)
binVal = histo.GetBinContent(i)
binLowEdge = histo.GetBinLowEdge(i)
binHighEdge = binLowEdge + binWidth
try:
prob = binVal/(float(numEntries)*binWidth)
sumProb += prob
except ZeroDivisionError:
prob = 0.0
print "Zero Division Error!"
info = tuple(['%.5f'%binLowEdge,'%.5f'%binHighEdge,'%.5f'%prob])
infoList.append(info)
histo.SetBinContent(i, prob)
# print "%s.)Bin Width:%.2f\tBinContent:%d\t Prob:%.2f"\
# %(i,binWidth,binVal,(prob))
return infoList
def writeOutputFile(self, filePath,varBins=True):
print 'Writing output file %s...' % filePath
outputFile = ROOT.TFile(filePath, 'RECREATE')
for topology in CLASS_FILE_PATH_DICT.keys():
for var in VARIABLE_LIST:
if varBins:
for i,(Emin,Emax) in enumerate(ENERGY_BINS):
self.getVarBinHistSlice(topology,var,i).Write()
else:
self.getPdfHist(topology, var).Write()
outputFile.Close()
print 'Done.'
def writeXmlFile(self,filepath,varBins=True):
print 'Writing output file %s...' % filepath
writer = pXmlWriter('%s'%filepath)
# writer.writeComment('GR-%s used for training'%GR_VERSION)
writer.writeComment('Generated by ClusterClassifier on %s'%\
time.asctime())
writer.writeComment('Precut used in training:')
writer.indent()
for topology in CLASS_FILE_PATH_DICT.keys():
cut = getCut(topology)
writer.writeComment('%s : %s'% (topology,cut))
writer.openTag('VariableBinsInfo')
writer.newLine()
writer.indent()
writer.writeComment('Energy intervals for the histograms log10(MeV).')
writer.openTag('EnergyBins')
writer.indent()
for i,(Emin,Emax) in enumerate(ENERGY_BINS):
writer.writeTag('Energy',{'bin':"%s"%i,'Emin':"%s"%Emin,'Emax':"%s"%Emax})
writer.backup()
writer.closeTag('EnergyBins')
writer.newLine
writer.writeComment('Histogram info for each topology considered (gam, had) and variables in equally populated bins. xmin, xmax are the bin low edge and hig edge, and prob is the probability in that bin.')
for topology in CLASS_FILE_PATH_DICT.keys():
writer.openTag('Topology',{'name':"%s"%topology,})
writer.newLine()
writer.indent()
for var in VARIABLE_LIST:
writer.openTag('Variable',{'name':"%s"%var.Label,})
writer.newLine()
writer.indent()
if varBins:
for i,(Emin,Emax) in enumerate(ENERGY_BINS):
writer.openTag('Energy',{'bin':"%s"%i,})
histo = self.getVarBinHistSlice(topology,var,i)
infoList = self.getSliceInfo(histo)
writer.indent()
for (xmin,xmax,prob) in infoList:
writer.writeTag('BinValues',{'xmin':"%s"%xmin,'xmax':"%s"%xmax,'pdv':"%s"%prob})
writer.backup()
writer.closeTag('Energy')
writer.backup()
writer.closeTag('Variable')
writer.newLine()
writer.backup()
writer.closeTag('Topology')
writer.newLine()
writer.backup()
writer.closeTag('VariableBinsInfo')
writer.closeFile()
if __name__ == '__main__':
c = ClusterClassifier(True,False)
c.writeOutputFile('cluclassTestBinning1.root')
# c.drawAllPdfHists()
# c.writeOutputFile('cluclassVarBins_dEdx.root')
# c.writeXmlFile('xml_TestCode.xml')
| 40.56427
| 213
| 0.555239
|
import ROOT
import time
import os
from array import array
from math import sqrt
from pXmlWriter import *
from ClusterConfig import *
class ClusterClassifier:
def __init__(self, varBins=True):
print 'Opening files...'
self.RootTreeDict = {}
for (topology, filePathList) in TRAIN_FILE_PATH_DICT.items():
self.RootTreeDict[topology] = ROOT.TChain('MeritTuple')
for file in filePathList:
self.RootTreeDict[topology].Add(file)
print 'Creating histograms for pdfs...'
self.PdfHistDict = {}
self.PdfHistSliceDict = {}
self.PdfVarBinsDict = {}
for topology in TRAIN_FILE_PATH_DICT.keys():
self.PdfHistDict[topology] = {}
self.PdfHistSliceDict[topology] = {}
self.PdfVarBinsDict[topology] = {}
for var in VARIABLE_LIST:
if varBins:
print 'Processing %s for %s with varBins' % (var, topology)
self.__createHistSlice(var, topology)
else:
print 'Processing %s for %s with fixedBins'%(var, topology)
self.__createPdfHist(var, topology)
print 'Done.'
def __setupTrainDict(self):
self.TrainDict = {}
for (topology,filePath) in FILE_PATH_DICT.items():
TRAIN_FILE_LIST = []
fileName = os.path.basename(filePath)
fileName = fileName.split('-')[0]
print fileName
TRAIN_FILE_LIST = getTrainFilePath(fileName)
self.TrainDict[topology] = TRAIN_FILE_PATH_LIST
def __createHistSlice(self, var, topology):
cut = getCut(topology)
self.PdfHistSliceDict[topology][var.Label] = {}
self.PdfVarBinsDict[topology][var.Label] = {}
for i,(Emin,Emax) in enumerate(ENERGY_BINS):
Ecut = "&&(log10(CalEnergyRaw)>=%s&&log10(CalEnergyRaw)<=%s)"\
%(Emin,Emax)
fullCut = cut+Ecut
xmax = var.MaxValue
print "Bounds for var %s are (%s,%s)"%(var.Expression,xmin,xmax)
hName = hname(var.Label, topology,i)
hTitle = '%s P.D.F. (%s) logE (%s,%s)' %\
(var.Expression, topology,Emin,Emax)
h = ROOT.TH1F(hName, hTitle, INI_NUM_BINS, xmin, xmax)
h.SetTitle(var.Expression)
self.RootTreeDict[topology].Project(hName, var.Expression, fullCut)
self.PdfHistDict[topology][var.Label] = h
h.SetTitle('Projection_equalBins')
h.SetLineColor(getColor(topology))
h.GetXaxis().SetLabelSize(0.06)
h.GetYaxis().SetLabelSize(0.06)
h.GetXaxis().SetTitleSize(0.06)
h.GetXaxis().SetTitleOffset(0.80)
print "%s cut:%s xrange: (%s,%s)"%(topology,fullCut,xmin,xmax)
self.PdfHistSliceDict[topology][var.Label][i] = h
self.getVariableBinning(var,topology,i,xmin,xmax)
Varbinning = self.PdfVarBinsDict[topology][var.Label][i]
numBins = len(Varbinning) - 1
print Varbinning,numBins
NewhName = hVarname(var.Label, topology,i)
NewhTitle = '%s varBin P.D.F. (%s) logE (%s,%s)' %\
(var.Expression, topology,Emin,Emax)
PdfSlice = ROOT.TH1F(NewhName, NewhTitle, numBins, Varbinning)
if numBins > 1:
self.RootTreeDict[topology].Project(NewhName,var.Expression, fullCut)
else:
print "Only one bin! Setting histo to zero!"
PdfSlice.SetBinContent(1,0)
PdfSlice.Draw()
ROOT.gPad.Update()
self.PdfVarBinsDict[topology][var.Label][i] = PdfSlice
def __createPdfHist(self, var, topology):
hName = hname(var.Label, topology)
hTitle = '%s P.D.F. (%s)' % (var.Expression, topology)
h = ROOT.TH2F(hName, hTitle, NUM_E_BINS, LOG_E_MIN, LOG_E_MAX,
var.NumBins, var.MinValue, var.MaxValue)
h.SetXTitle('log10(CalEnergyRaw)')
h.SetYTitle(var.Expression)
self.PdfHistDict[topology][var.Label] = h
expr = '%s:log10(CalEnergyRaw)' % var.Expression
cut = getCut(topology)
self.RootTreeDict[topology].Project(hName, expr, cut)
normalizeSlices(h)
self.PdfHistSliceDict[topology][var.Label] = {}
for i in range(NUM_E_BINS):
hSlice = h.ProjectionY('%s_slice%d' % (h.GetName(), i), i+1, i+1)
hSlice.SetTitle('P.D.F.')
hSlice.SetLineColor(getColor(topology))
hSlice.GetXaxis().SetLabelSize(0.06)
hSlice.GetYaxis().SetLabelSize(0.06)
hSlice.GetXaxis().SetTitleSize(0.06)
hSlice.GetXaxis().SetTitleOffset(0.80)
self.PdfHistSliceDict[topology][var.Label][i] = hSlice
def getBoundValues(self, topology, expr, cut = ''):
""" Retrieve the maximum and minimum values for a generic expression.
This is unelegant in that, in order not to loop over the event,
which is slow in python, the chain is projected over a temporary
histogram wich is then deleted. Unfortunately for a generic expression
we cannot handle this with ROOT.TTree.GetMaximum/MinValue().
"""
print 'Retrieving bound values for "%s"...' % expr
self.RootTreeDict[topology].Project('temphist', expr, str(cut))
htemp = ROOT.gDirectory.Get('temphist')
numBins = htemp.GetNbinsX()
minValue = htemp.GetBinCenter(1) - 0.5*htemp.GetBinWidth(1)
maxValue = htemp.GetBinCenter(numBins) + 0.5*htemp.GetBinWidth(numBins)
print "Initial min and max from getBoundValues (%s,%s)"%(minValue,maxValue)
"""
##Here I am cutting out a given amount from the tail of the
##distributions (you can set it via frac). I found that it was
##messing up the classification. So for I am commenting this part
##and taking everything, most likely I will need to think of a more
##clever way to bin my histos.
#htemp.Draw()
#ROOT.gPad.Update()
#raw_input()
Nbins = htemp.GetNbinsX()
totalEntries = htemp.GetEntries()
tot = 0
for i in range(1,Nbins + 1):
tot += htemp.GetBinContent(i)
try:
frac = tot/totalEntries
except ZeroDivisionError:
frac = 0
binLowEdge = htemp.GetBinLowEdge(i)
binWidth = htemp.GetBinWidth(i)
binHighEdge = binLowEdge + binWidth
if frac<0.99:
maxValue = binHighEdge
#ROOT.gPad.Update()
"""
htemp.Delete()
maxValue = 1.0
return (minValue, maxValue)
def getBinInfo(self,var,topology,i):
= self.PdfHistSliceDict[topology][var.Label][i]
TotalEntries = histo.GetEntries()
if TotalEntries >200.0:
Numbins = min(var.NumBins,sqrt(TotalEntries))
print "Tot:%s, going to use %s bins "%\
(TotalEntries,Numbins)
elif TotalEntries<=200.0:
print "** Total entries less than 200 (%s)! Going to use 1 bin!"%\
TotalEntries
Numbins = 1.0
NeededEntries = TotalEntries/Numbins
PADDING = sqrt(NeededEntries)
print "Needed entries %s" % NeededEntries
return NeededEntries, PADDING,Numbins
def getVariableBinning(self,var,topology,i,xmin,xmax):
binList = []
counter = 0
tot = 0
histo = self.PdfHistSliceDict[topology][var.Label][i]
TotalEntries = histo.GetEntries()
(NeededEntries, PADDING,Numbins) = self.getBinInfo(var,topology,i)
if Numbins>1:
binList.append(xmin)
for bin in range(1,INI_NUM_BINS + 1):
entriesPerBin = histo.GetBinContent(bin)
binLowEdge = histo.GetBinLowEdge(bin)
binWidth = histo.GetBinWidth(bin)
binHighEdge = binLowEdge + binWidth
counter+=entriesPerBin
if counter>=NeededEntries - PADDING:
totFrac = counter/TotalEntries
print "***totFrac = %s\t counter:%s"%(totFrac,counter)
if counter!=(TotalEntries):
print "%s.) %s %s %.4f"%\
(bin,counter,NeededEntries,binHighEdge)
binList.append(binHighEdge)
counter = 0
if binList[-1]<xmax:
print "Last value in the list is ",binList[-1]
binList.append(xmax)
print "Adding max to bin list!"
else:
binList = [xmin,xmax]
print "NumBins is equal to 1, taking %s as bins!"%binList
NewBins = array('f',binList)
self.PdfVarBinsDict[topology][var.Label][i] = NewBins
def drawAllPdfHists(self):
for var in VARIABLE_LIST:
self.drawPdfHists(var)
def drawPdfHists(self, var):
cName = '%s_2d' % var.Label
cTitle = '%s (2d)' % var.Expression
c = ROOT.TCanvas(cName, cTitle, 1000, 800)
toPool(c)
c.Divide(2, 2)
for (i, topology) in enumerate(FILE_PATH_DICT.keys()):
c.cd(i + 1)
ROOT.gPad.SetRightMargin(0.15)
self.getPdfHist(topology, var).Draw('colz,text')
ROOT.gPad.SetLogz(True)
c.cd()
cName = '%s_slices' % var.Label
cTitle = '%s (slices)' % var.Expression
c = ROOT.TCanvas(cName, cTitle, 1000, 600)
toPool(c)
c.Divide(4, 3)
for i in range(NUM_E_BINS):
legend = ROOT.TLegend(0.65, 0.67, 0.90, 0.85)
legend.SetName('%s_legend_slice%d' % (var.Label, i))
legend.SetFillStyle(0)
legend.SetLineStyle(0)
legend.SetLineWidth(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.08)
toPool(legend)
c.cd(i + 1)
ymax = 0
for (j, topology) in enumerate(FILE_PATH_DICT.keys()):
hSlice = self.getPdfSliceHist(topology, var, i)
y = hSlice.GetMaximum()
if y > ymax:
ymax = y
for (j, topology) in enumerate(FILE_PATH_DICT.keys()):
hSlice = self.getPdfSliceHist(topology, var, i)
hSlice.SetMaximum(1.2*ymax)
hSlice.Draw('same'*(j!=0))
legend.AddEntry(hSlice, topology)
legend.Draw()
logemin = LOG_E_MIN +\
i*float(LOG_E_MAX - LOG_E_MIN)/float(NUM_E_BINS)
logemax = LOG_E_MIN +\
(i + 1)*float(LOG_E_MAX - LOG_E_MIN)/float(NUM_E_BINS)
emin = (10**logemin)
emax = (10**logemax)
label = ROOT.TLatex(0.15, 0.8, '%.d--%d MeV' %\
(emin, emax))
label.SetName('%s_label_slice%d' % (var.Label, i))
label.SetTextSize(0.06)
label.SetNDC()
toPool(label)
label.Draw()
c.cd()
def getPdfHist(self, topology, var):
return self.PdfHistDict[topology][var.Label]
def getVarBinHistSlice(self,topology,var,i):
return self.PdfVarBinsDict[topology][var.Label][i]
def getPdfSliceHist(self, topology, var, i):
return self.PdfHistSliceDict[topology][var.Label][i]
def getSliceInfo(self,histo):
infoList = []
numBins = histo.GetNbinsX()
numEntries = histo.GetEntries()
print "Number of bins:",numBins, numEntries
sumProb = 0
for i in xrange(1, numBins + 1):
binWidth = histo.GetBinWidth(i)
binVal = histo.GetBinContent(i)
binLowEdge = histo.GetBinLowEdge(i)
binHighEdge = binLowEdge + binWidth
try:
prob = binVal/(float(numEntries)*binWidth)
sumProb += prob
except ZeroDivisionError:
prob = 0.0
print "Zero Division Error!"
info = tuple(['%.5f'%binLowEdge,'%.5f'%binHighEdge,'%.5f'%prob])
infoList.append(info)
histo.SetBinContent(i, prob)
return infoList
def writeOutputFile(self, filePath,varBins=True):
print 'Writing output file %s...' % filePath
outputFile = ROOT.TFile(filePath, 'RECREATE')
for topology in CLASS_FILE_PATH_DICT.keys():
for var in VARIABLE_LIST:
if varBins:
for i,(Emin,Emax) in enumerate(ENERGY_BINS):
self.getVarBinHistSlice(topology,var,i).Write()
else:
self.getPdfHist(topology, var).Write()
outputFile.Close()
print 'Done.'
def writeXmlFile(self,filepath,varBins=True):
print 'Writing output file %s...' % filepath
writer = pXmlWriter('%s'%filepath)
writer.writeComment('Generated by ClusterClassifier on %s'%\
time.asctime())
writer.writeComment('Precut used in training:')
writer.indent()
for topology in CLASS_FILE_PATH_DICT.keys():
cut = getCut(topology)
writer.writeComment('%s : %s'% (topology,cut))
writer.openTag('VariableBinsInfo')
writer.newLine()
writer.indent()
writer.writeComment('Energy intervals for the histograms log10(MeV).')
writer.openTag('EnergyBins')
writer.indent()
for i,(Emin,Emax) in enumerate(ENERGY_BINS):
writer.writeTag('Energy',{'bin':"%s"%i,'Emin':"%s"%Emin,'Emax':"%s"%Emax})
writer.backup()
writer.closeTag('EnergyBins')
writer.newLine
writer.writeComment('Histogram info for each topology considered (gam, had) and variables in equally populated bins. xmin, xmax are the bin low edge and hig edge, and prob is the probability in that bin.')
for topology in CLASS_FILE_PATH_DICT.keys():
writer.openTag('Topology',{'name':"%s"%topology,})
writer.newLine()
writer.indent()
for var in VARIABLE_LIST:
writer.openTag('Variable',{'name':"%s"%var.Label,})
writer.newLine()
writer.indent()
if varBins:
for i,(Emin,Emax) in enumerate(ENERGY_BINS):
writer.openTag('Energy',{'bin':"%s"%i,})
histo = self.getVarBinHistSlice(topology,var,i)
infoList = self.getSliceInfo(histo)
writer.indent()
for (xmin,xmax,prob) in infoList:
writer.writeTag('BinValues',{'xmin':"%s"%xmin,'xmax':"%s"%xmax,'pdv':"%s"%prob})
writer.backup()
writer.closeTag('Energy')
writer.backup()
writer.closeTag('Variable')
writer.newLine()
writer.backup()
writer.closeTag('Topology')
writer.newLine()
writer.backup()
writer.closeTag('VariableBinsInfo')
writer.closeFile()
if __name__ == '__main__':
c = ClusterClassifier(True,False)
c.writeOutputFile('cluclassTestBinning1.root')
| false
| true
|
f715a037d80404b6931c2c3dd6c455b1ba329594
| 4,755
|
py
|
Python
|
tools/stats_mcdc_data.py
|
Yc174/tf-faster-rcnn-mcdc
|
02d6008f2d689e6f928d2de24fc660073044d1b8
|
[
"MIT"
] | null | null | null |
tools/stats_mcdc_data.py
|
Yc174/tf-faster-rcnn-mcdc
|
02d6008f2d689e6f928d2de24fc660073044d1b8
|
[
"MIT"
] | null | null | null |
tools/stats_mcdc_data.py
|
Yc174/tf-faster-rcnn-mcdc
|
02d6008f2d689e6f928d2de24fc660073044d1b8
|
[
"MIT"
] | null | null | null |
#coding=utf-8
from __future__ import print_function
import time
import argparse
from glob import glob
import os, cv2
import json
def show(image_path, bbox):
print(image_path, bbox)
im = cv2.imread(image_path)
x, y, w, h = bbox
# left = int(x - w / 2)
# right = int(x + w / 2)
# top = int(y - h / 2)
# bottom = int(y + h / 2)
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(im, (left, top), (right, bottom), color=[0, 255, 0], thickness=3)
im = cv2.resize(im, (im.shape[1]/2, im.shape[0]/2))
cv2.imshow('image', im)
# draw_bbox_with_center(arr, r)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png', im)
cv2.destroyAllWindows()
def show_with_center(image_path, bbox):
print(image_path, bbox)
im = cv2.imread(image_path)
x, y, w, h = bbox
left = int(x - w / 2)
right = int(x + w / 2)
top = int(y - h / 2)
bottom = int(y + h / 2)
cv2.rectangle(im, (left, top), (right, bottom), color=[0, 255, 0], thickness=3)
im = cv2.resize(im, (im.shape[1]/2, im.shape[0]/2))
cv2.imshow('image', im)
# draw_bbox_with_center(arr, r)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png', im)
cv2.destroyAllWindows()
if __name__ == '__main__':
# data_dir = '/home/hzshuai/mcdc/mcdc_data'
data_dir = '/data/mcdc_data'
train_dir = data_dir + '/train/train_images'
label_dir = '/home/m12/mcdc_data/train/train_labels'
ann_file = data_dir + '/train/MCDC_train_100000.coco.json'
with open(ann_file) as fin:
ann = json.loads(fin.read())
# with open(label_dir + '/train_format.json', 'w') as fout:
# json.dump(ann, fout, indent=4, ensure_ascii=False)
ann_map = {}
cls = {}
for im in ann['images']:
ann_map[im['id']] = im
for a in ann['annotations']:
if 'car_rear' in a and 'rear_box' in a['car_rear'] and a['image_id'] in ann_map:
if 'ann' not in ann_map[a['image_id']]:
ann_map[a['image_id']]['ann'] = []
ann_map[a['image_id']]['ann'].append(a)
if a['type'] not in cls:
cls[a['type']] = 0
cls[a['type']] += 1
# {u'xiaoxingche': 189955, u'gongchengche': 305, u'huoche': 12975, u'unknown': 63462, u'sanlunche': 6684, u'others': 228, u'gongjiaokeche': 20610}
# 96104
#{u'xiaoxingche': 18813, u'gongchengche': 26, u'huoche': 1267, u'unknown': 6244, u'sanlunche': 642, u'others': 19, u'gongjiaokeche': 1912}
# if a['type'] == 'unknown' and cls[a['type']] % 23 == 0:
# if a['image_id'] == 0:
# image_path = train_dir + '/' + ann_map[a['image_id']]['file_name']
# show(image_path, a['car_rear']['rear_box'])
print(cls)
im_list = []
cls = ['xiaoxingche', 'gongchengche', 'huoche', 'unknown', 'sanlunche', 'others', 'gongjiaokeche']
for k, image in ann_map.iteritems():
if 'ann' in image:
# print(k)
# print(k, image)
image_path = train_dir + '/' + image['file_name']
im_list.append(image_path)
txt_path = label_dir + '/' + image['file_name'][:-4] + '.txt'
dirname = os.path.dirname(txt_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
dw, dh = 1./image['width'], 1./image['height']
# print(txt_path, 1./dw, 1./dh)
with open(txt_path, 'w') as fout:
for a in image['ann']:
# print(a)
x, y, w, h = a['car_rear']['rear_box']
# show(image_path, (x, y, w, h))
x = x + w / 2.
y = y + h / 2.
# show_with_center(image_path, (x, y, w, h))
x *= dw
y *= dh
w *= dw
h *= dh
bb = [x, y, w, h]
cls_id = cls.index(a['type'])
fout.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
# break
print(txt_path)
print(len(im_list))
with open(label_dir + '/train.txt', 'w') as fout:
for e in im_list:
fout.write(e + '\n')
with open(label_dir + '/valid.txt', 'w') as fout:
for i, e in enumerate(im_list):
if i % 10 == 0:
fout.write(e + '\n')
| 30.677419
| 146
| 0.512303
|
from __future__ import print_function
import time
import argparse
from glob import glob
import os, cv2
import json
def show(image_path, bbox):
print(image_path, bbox)
im = cv2.imread(image_path)
x, y, w, h = bbox
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(im, (left, top), (right, bottom), color=[0, 255, 0], thickness=3)
im = cv2.resize(im, (im.shape[1]/2, im.shape[0]/2))
cv2.imshow('image', im)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
elif k == ord('s'):
cv2.imwrite('messigray.png', im)
cv2.destroyAllWindows()
def show_with_center(image_path, bbox):
print(image_path, bbox)
im = cv2.imread(image_path)
x, y, w, h = bbox
left = int(x - w / 2)
right = int(x + w / 2)
top = int(y - h / 2)
bottom = int(y + h / 2)
cv2.rectangle(im, (left, top), (right, bottom), color=[0, 255, 0], thickness=3)
im = cv2.resize(im, (im.shape[1]/2, im.shape[0]/2))
cv2.imshow('image', im)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
elif k == ord('s'):
cv2.imwrite('messigray.png', im)
cv2.destroyAllWindows()
if __name__ == '__main__':
data_dir = '/data/mcdc_data'
train_dir = data_dir + '/train/train_images'
label_dir = '/home/m12/mcdc_data/train/train_labels'
ann_file = data_dir + '/train/MCDC_train_100000.coco.json'
with open(ann_file) as fin:
ann = json.loads(fin.read())
ann_map = {}
cls = {}
for im in ann['images']:
ann_map[im['id']] = im
for a in ann['annotations']:
if 'car_rear' in a and 'rear_box' in a['car_rear'] and a['image_id'] in ann_map:
if 'ann' not in ann_map[a['image_id']]:
ann_map[a['image_id']]['ann'] = []
ann_map[a['image_id']]['ann'].append(a)
if a['type'] not in cls:
cls[a['type']] = 0
cls[a['type']] += 1
print(cls)
im_list = []
cls = ['xiaoxingche', 'gongchengche', 'huoche', 'unknown', 'sanlunche', 'others', 'gongjiaokeche']
for k, image in ann_map.iteritems():
if 'ann' in image:
image_path = train_dir + '/' + image['file_name']
im_list.append(image_path)
txt_path = label_dir + '/' + image['file_name'][:-4] + '.txt'
dirname = os.path.dirname(txt_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
dw, dh = 1./image['width'], 1./image['height']
with open(txt_path, 'w') as fout:
for a in image['ann']:
x, y, w, h = a['car_rear']['rear_box']
x = x + w / 2.
y = y + h / 2.
x *= dw
y *= dh
w *= dw
h *= dh
bb = [x, y, w, h]
cls_id = cls.index(a['type'])
fout.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
print(txt_path)
print(len(im_list))
with open(label_dir + '/train.txt', 'w') as fout:
for e in im_list:
fout.write(e + '\n')
with open(label_dir + '/valid.txt', 'w') as fout:
for i, e in enumerate(im_list):
if i % 10 == 0:
fout.write(e + '\n')
| true
| true
|
f715a131ad3bfd03cd6d8810488e273d1fd54f64
| 6,015
|
py
|
Python
|
docker/api/daemon.py
|
jbn/docker-py
|
1e38d31c9fc74d07cb8dd3b7b100723bfacd23f7
|
[
"Apache-2.0"
] | 72
|
2018-07-02T07:47:15.000Z
|
2022-03-29T10:02:14.000Z
|
docker/api/daemon.py
|
jbn/docker-py
|
1e38d31c9fc74d07cb8dd3b7b100723bfacd23f7
|
[
"Apache-2.0"
] | 51
|
2019-10-08T01:53:02.000Z
|
2021-06-04T22:02:21.000Z
|
docker/api/daemon.py
|
jbn/docker-py
|
1e38d31c9fc74d07cb8dd3b7b100723bfacd23f7
|
[
"Apache-2.0"
] | 29
|
2018-09-17T06:10:32.000Z
|
2022-03-19T13:15:30.000Z
|
import os
from datetime import datetime
from .. import auth, types, utils
class DaemonApiMixin(object):
@utils.minimum_version('1.25')
def df(self):
"""
Get data usage information.
Returns:
(dict): A dictionary representing different resource categories
and their respective data usage.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None):
"""
Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events(decode=True)
... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
or
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None)
stream = self._stream_helper(response, decode=decode)
return types.CancellableStream(stream, response)
def info(self):
"""
Display system-wide information. Identical to the ``docker info``
command.
Returns:
(dict): The info as a dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
"""
Checks the server is responsive. An exception will be raised if it
isn't responding.
Returns:
(bool) The response from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
"""
Returns version information from the server. Similar to the ``docker
version`` command.
Returns:
(dict): The server version information
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
| 33.049451
| 79
| 0.571904
|
import os
from datetime import datetime
from .. import auth, types, utils
class DaemonApiMixin(object):
@utils.minimum_version('1.25')
def df(self):
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None):
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None)
stream = self._stream_helper(response, decode=decode)
return types.CancellableStream(stream, response)
def info(self):
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
| true
| true
|
f715a15885e13fd0957f648c1414a90e72a239ca
| 10,690
|
py
|
Python
|
stable_baselines3/dqn/dqn.py
|
haorang/285
|
3b7369b8eb4433952c9cdf27d4feaa015a6c40e4
|
[
"MIT"
] | 26
|
2021-11-05T08:46:06.000Z
|
2022-03-22T05:53:57.000Z
|
stable_baselines3/dqn/dqn.py
|
haorang/285
|
3b7369b8eb4433952c9cdf27d4feaa015a6c40e4
|
[
"MIT"
] | 1
|
2021-11-19T11:13:37.000Z
|
2021-11-30T09:08:04.000Z
|
stable_baselines3/dqn/dqn.py
|
haorang/285
|
3b7369b8eb4433952c9cdf27d4feaa015a6c40e4
|
[
"MIT"
] | 5
|
2021-11-05T08:46:12.000Z
|
2022-03-25T21:56:58.000Z
|
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
from stable_baselines3.dqn.policies import DQNPolicy
class DQN(OffPolicyAlgorithm):
"""
Deep Q-Network (DQN)
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
Default hyperparameters are taken from the nature paper,
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.
:param gradient_steps: How many gradient steps to do after each rollout
(see ``train_freq`` and ``n_episodes_rollout``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.
Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param target_update_interval: update the target network every ``target_update_interval``
environment steps.
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
:param exploration_initial_eps: initial value of random action probability
:param exploration_final_eps: final value of random action probability
:param max_grad_norm: The maximum value for the gradient clipping
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[DQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
buffer_size: int = 1000000,
learning_starts: int = 50000,
batch_size: Optional[int] = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: int = 4,
gradient_steps: int = 1,
n_episodes_rollout: int = -1,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(DQN, self).__init__(
policy,
env,
DQNPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
n_episodes_rollout,
action_noise=None, # No action noise
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
self.max_grad_norm = max_grad_norm
# "epsilon" for the epsilon-greedy exploration
self.exploration_rate = 0.0
# Linear schedule will be defined in `_setup_model()`
self.exploration_schedule = None
self.q_net, self.q_net_target = None, None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(DQN, self)._setup_model()
self._create_aliases()
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
)
def _create_aliases(self) -> None:
self.q_net = self.policy.q_net
self.q_net_target = self.policy.q_net_target
def _on_step(self) -> None:
"""
Update the exploration rate and target network if needed.
This method is called in ``collect_rollouts()`` after each step in the environment.
"""
if self.num_timesteps % self.target_update_interval == 0:
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
logger.record("rollout/exploration rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Update learning rate according to schedule
self._update_learning_rate(self.policy.optimizer)
losses = []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Compute the target Q values
target_q = self.q_net_target(replay_data.next_observations)
# Follow greedy policy: use the one with the highest value
target_q, _ = target_q.max(dim=1)
# Avoid potential broadcast issue
target_q = target_q.reshape(-1, 1)
# 1-step TD target
target_q = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q
# Get current Q estimates
current_q = self.q_net(replay_data.observations)
# Retrieve the q-values for the actions from the replay buffer
current_q = th.gather(current_q, dim=1, index=replay_data.actions.long())
# Compute Huber loss (less sensitive to outliers)
loss = F.smooth_l1_loss(current_q, target_q)
losses.append(loss.item())
# Optimize the policy
self.policy.optimizer.zero_grad()
loss.backward()
# Clip gradient norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
# Increase update counter
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Overrides the base_class predict function to include epsilon-greedy exploration.
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
if not deterministic and np.random.rand() < self.exploration_rate:
if is_vectorized_observation(observation, self.observation_space):
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
else:
action, state = self.policy.predict(observation, state, mask, deterministic)
return action, state
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DQN",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(DQN, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
| 43.279352
| 110
| 0.659682
|
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
from stable_baselines3.dqn.policies import DQNPolicy
class DQN(OffPolicyAlgorithm):
def __init__(
self,
policy: Union[str, Type[DQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
buffer_size: int = 1000000,
learning_starts: int = 50000,
batch_size: Optional[int] = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: int = 4,
gradient_steps: int = 1,
n_episodes_rollout: int = -1,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(DQN, self).__init__(
policy,
env,
DQNPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
n_episodes_rollout,
action_noise=None,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
self.max_grad_norm = max_grad_norm
self.exploration_rate = 0.0
self.exploration_schedule = None
self.q_net, self.q_net_target = None, None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(DQN, self)._setup_model()
self._create_aliases()
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
)
def _create_aliases(self) -> None:
self.q_net = self.policy.q_net
self.q_net_target = self.policy.q_net_target
def _on_step(self) -> None:
if self.num_timesteps % self.target_update_interval == 0:
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
logger.record("rollout/exploration rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
self._update_learning_rate(self.policy.optimizer)
losses = []
for gradient_step in range(gradient_steps):
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
target_q = self.q_net_target(replay_data.next_observations)
target_q, _ = target_q.max(dim=1)
target_q = target_q.reshape(-1, 1)
target_q = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q
current_q = self.q_net(replay_data.observations)
current_q = th.gather(current_q, dim=1, index=replay_data.actions.long())
loss = F.smooth_l1_loss(current_q, target_q)
losses.append(loss.item())
self.policy.optimizer.zero_grad()
loss.backward()
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
if not deterministic and np.random.rand() < self.exploration_rate:
if is_vectorized_observation(observation, self.observation_space):
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
else:
action, state = self.policy.predict(observation, state, mask, deterministic)
return action, state
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DQN",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(DQN, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
| true
| true
|
f715a21938d09961aef70bbfb712b4ac4b78ccb3
| 2,266
|
py
|
Python
|
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_sdram_alloc_request.py
|
Roboy/LSM_SpiNNaker_MyoArm
|
04fa1eaf78778edea3ba3afa4c527d20c491718e
|
[
"BSD-3-Clause"
] | 2
|
2020-11-01T13:22:11.000Z
|
2020-11-01T13:22:20.000Z
|
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_sdram_alloc_request.py
|
Roboy/LSM_SpiNNaker_MyoArm
|
04fa1eaf78778edea3ba3afa4c527d20c491718e
|
[
"BSD-3-Clause"
] | null | null | null |
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_sdram_alloc_request.py
|
Roboy/LSM_SpiNNaker_MyoArm
|
04fa1eaf78778edea3ba3afa4c527d20c491718e
|
[
"BSD-3-Clause"
] | null | null | null |
from spinnman.messages.scp.abstract_messages.abstract_scp_request\
import AbstractSCPRequest
from spinnman.messages.scp.impl.scp_sdram_alloc_response import \
SCPSDRAMAllocResponse
from spinnman.messages.sdp.sdp_header import SDPHeader
from spinnman.messages.sdp.sdp_flag import SDPFlag
from spinnman.messages.scp.scp_request_header import SCPRequestHeader
from spinnman.messages.scp.scp_command import SCPCommand
from spinnman.messages.scp.scp_alloc_free_type import SCPAllocFreeType
from spinnman import exceptions
class SCPSDRAMAllocRequest(AbstractSCPRequest):
""" An SCP Request to allocate space in the SDRAM space
"""
def __init__(self, x, y, app_id, size, tag=None):
"""
:param x: The x-coordinate of the chip to allocate on, between 0 and\
255
:type x: int
:param y: The y-coordinate of the chip to allocate on, between 0 and\
255
:type y: int
:param app_id: The id of the application, between 0 and 255
:type app_id: int
:param size: The size in bytes of memory to be allocated
:type size: int
:param tag: the tag for the SDRAM, a 8-bit (chip-wide) tag that can be\
looked up by a SpiNNaker application to discover the address\
of the allocated block. If `0` then no tag is applied.
:type tag: int
"""
if tag is None:
tag = 0
elif not(0 <= tag < 256):
raise exceptions.SpinnmanInvalidParameterException(
"The tag param needs to be between 0 and 255, or None (in "
"which case 0 will be used by default)")
AbstractSCPRequest.__init__(
self,
SDPHeader(
flags=SDPFlag.REPLY_EXPECTED, destination_port=0,
destination_cpu=0, destination_chip_x=x,
destination_chip_y=y),
SCPRequestHeader(command=SCPCommand.CMD_ALLOC),
argument_1=(
(app_id << 8) |
SCPAllocFreeType.ALLOC_SDRAM.value), # @UndefinedVariable
argument_2=size, argument_3=tag)
self._size = size
def get_scp_response(self):
return SCPSDRAMAllocResponse(self._size)
| 39.068966
| 79
| 0.644307
|
from spinnman.messages.scp.abstract_messages.abstract_scp_request\
import AbstractSCPRequest
from spinnman.messages.scp.impl.scp_sdram_alloc_response import \
SCPSDRAMAllocResponse
from spinnman.messages.sdp.sdp_header import SDPHeader
from spinnman.messages.sdp.sdp_flag import SDPFlag
from spinnman.messages.scp.scp_request_header import SCPRequestHeader
from spinnman.messages.scp.scp_command import SCPCommand
from spinnman.messages.scp.scp_alloc_free_type import SCPAllocFreeType
from spinnman import exceptions
class SCPSDRAMAllocRequest(AbstractSCPRequest):
def __init__(self, x, y, app_id, size, tag=None):
if tag is None:
tag = 0
elif not(0 <= tag < 256):
raise exceptions.SpinnmanInvalidParameterException(
"The tag param needs to be between 0 and 255, or None (in "
"which case 0 will be used by default)")
AbstractSCPRequest.__init__(
self,
SDPHeader(
flags=SDPFlag.REPLY_EXPECTED, destination_port=0,
destination_cpu=0, destination_chip_x=x,
destination_chip_y=y),
SCPRequestHeader(command=SCPCommand.CMD_ALLOC),
argument_1=(
(app_id << 8) |
SCPAllocFreeType.ALLOC_SDRAM.value),
argument_2=size, argument_3=tag)
self._size = size
def get_scp_response(self):
return SCPSDRAMAllocResponse(self._size)
| true
| true
|
f715a27d0c9909bea75ea1edd3eb15e6bba3b9a4
| 5,102
|
py
|
Python
|
gluon/packages/dal/pydal/adapters/mssql.py
|
kyomei/python-locadora
|
c461252387f77bd01465fd851d0b5bfa9ce53493
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/packages/dal/pydal/adapters/mssql.py
|
kyomei/python-locadora
|
c461252387f77bd01465fd851d0b5bfa9ce53493
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/packages/dal/pydal/adapters/mssql.py
|
kyomei/python-locadora
|
c461252387f77bd01465fd851d0b5bfa9ce53493
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from .._compat import PY2, iteritems, integer_types, to_unicode, long
from .._globals import IDENTITY
from .base import SQLAdapter
from . import adapters, with_connection_or_raise
class Slicer(object):
def rowslice(self, rows, minimum=0, maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class MSSQL(SQLAdapter):
dbengine = 'mssql'
drivers = ('pyodbc',)
REGEX_DSN = '^.+$'
REGEX_URI = \
'^(?P<user>[^:@]+)(:(?P<password>[^@]*))?' \
r'@(?P<host>[^:/]+|\[[^\]]+\])(:(?P<port>\d+))?' \
'/(?P<db>[^?]+)' \
r'(\?(?P<urlargs>.*))?$'
REGEX_ARG_VAL = '(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)'
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.srid = srid
super(MSSQL, self).__init__(
db, uri, pool_size, folder, db_codec, credential_decoder,
driver_args, adapter_args, do_connect, after_connection)
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
m = re.match(self.REGEX_DSN, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
self.dsn = m.group()
else:
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
password = self.credential_decoder(m.group('password'))
if password is None:
password = ''
host = m.group('host')
db = m.group('db')
port = m.group('port') or '1433'
# Parse the optional url name-value arg pairs after the '?'
# (in the form of arg1=value1&arg2=value2&...)
# (drivers like FreeTDS insist on uppercase parameter keys)
argsdict = {'DRIVER': '{SQL Server}'}
urlargs = m.group('urlargs') or ''
for argmatch in re.finditer(self.REGEX_ARG_VAL, urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = \
argmatch.group('argvalue')
urlargs = ';'.join([
'%s=%s' % (ak, av) for (ak, av) in iteritems(argsdict)])
self.dsn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connector(self):
return self.driver.connect(self.dsn, **self.driver_args)
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('mssql')
class MSSQL1(MSSQL, Slicer):
pass
@adapters.register_for('mssql3')
class MSSQL3(MSSQL):
pass
@adapters.register_for('mssql4')
class MSSQL4(MSSQL):
pass
class MSSQLN(MSSQL):
def represent(self, obj, field_type):
rv = super(MSSQLN, self).represent(obj, field_type)
if field_type in ('string', 'text', 'json') and rv.startswith("'"):
rv = 'N' + rv
return rv
@with_connection_or_raise
def execute(self, *args, **kwargs):
if PY2:
args = list(args)
args[0] = to_unicode(args[0])
return super(MSSQLN, self).execute(*args, **kwargs)
@adapters.register_for('mssqln', 'mssql2')
class MSSQL1N(MSSQLN, Slicer):
pass
@adapters.register_for('mssql3n')
class MSSQL3N(MSSQLN):
pass
@adapters.register_for('mssql4n')
class MSSQL4N(MSSQLN):
pass
@adapters.register_for('vertica')
class Vertica(MSSQL1):
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('sybase')
class Sybase(MSSQL1):
dbengine = 'sybase'
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
m = re.match(self.REGEX_DSN, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
dsn = m.group()
else:
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
password = self.credential_decoder(m.group('password'))
if password is None:
password = ''
host = m.group('host')
db = m.group('db')
port = m.group('port') or '1433'
self.dsn = 'sybase:host=%s:%s;dbname=%s' % (host, port, db)
self.driver_args.update(
user=self.credential_decoder(user),
passwd=self.credential_decoder(password))
| 32.705128
| 75
| 0.561348
|
import re
from .._compat import PY2, iteritems, integer_types, to_unicode, long
from .._globals import IDENTITY
from .base import SQLAdapter
from . import adapters, with_connection_or_raise
class Slicer(object):
def rowslice(self, rows, minimum=0, maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class MSSQL(SQLAdapter):
dbengine = 'mssql'
drivers = ('pyodbc',)
REGEX_DSN = '^.+$'
REGEX_URI = \
'^(?P<user>[^:@]+)(:(?P<password>[^@]*))?' \
r'@(?P<host>[^:/]+|\[[^\]]+\])(:(?P<port>\d+))?' \
'/(?P<db>[^?]+)' \
r'(\?(?P<urlargs>.*))?$'
REGEX_ARG_VAL = '(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)'
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.srid = srid
super(MSSQL, self).__init__(
db, uri, pool_size, folder, db_codec, credential_decoder,
driver_args, adapter_args, do_connect, after_connection)
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
m = re.match(self.REGEX_DSN, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
self.dsn = m.group()
else:
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
password = self.credential_decoder(m.group('password'))
if password is None:
password = ''
host = m.group('host')
db = m.group('db')
port = m.group('port') or '1433'
argsdict = {'DRIVER': '{SQL Server}'}
urlargs = m.group('urlargs') or ''
for argmatch in re.finditer(self.REGEX_ARG_VAL, urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = \
argmatch.group('argvalue')
urlargs = ';'.join([
'%s=%s' % (ak, av) for (ak, av) in iteritems(argsdict)])
self.dsn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connector(self):
return self.driver.connect(self.dsn, **self.driver_args)
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('mssql')
class MSSQL1(MSSQL, Slicer):
pass
@adapters.register_for('mssql3')
class MSSQL3(MSSQL):
pass
@adapters.register_for('mssql4')
class MSSQL4(MSSQL):
pass
class MSSQLN(MSSQL):
def represent(self, obj, field_type):
rv = super(MSSQLN, self).represent(obj, field_type)
if field_type in ('string', 'text', 'json') and rv.startswith("'"):
rv = 'N' + rv
return rv
@with_connection_or_raise
def execute(self, *args, **kwargs):
if PY2:
args = list(args)
args[0] = to_unicode(args[0])
return super(MSSQLN, self).execute(*args, **kwargs)
@adapters.register_for('mssqln', 'mssql2')
class MSSQL1N(MSSQLN, Slicer):
pass
@adapters.register_for('mssql3n')
class MSSQL3N(MSSQLN):
pass
@adapters.register_for('mssql4n')
class MSSQL4N(MSSQLN):
pass
@adapters.register_for('vertica')
class Vertica(MSSQL1):
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('sybase')
class Sybase(MSSQL1):
dbengine = 'sybase'
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
m = re.match(self.REGEX_DSN, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
dsn = m.group()
else:
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
password = self.credential_decoder(m.group('password'))
if password is None:
password = ''
host = m.group('host')
db = m.group('db')
port = m.group('port') or '1433'
self.dsn = 'sybase:host=%s:%s;dbname=%s' % (host, port, db)
self.driver_args.update(
user=self.credential_decoder(user),
passwd=self.credential_decoder(password))
| true
| true
|
f715a381967b6c4678430e111919f89608f9e232
| 1,922
|
py
|
Python
|
astropy/stats/lombscargle/implementations/tests/test_mle.py
|
b1quint/astropy
|
a170a74739e4356c169429a42e554f9777b53f4d
|
[
"BSD-3-Clause"
] | 8
|
2019-04-27T01:19:45.000Z
|
2020-09-21T03:31:01.000Z
|
astropy/stats/lombscargle/implementations/tests/test_mle.py
|
b1quint/astropy
|
a170a74739e4356c169429a42e554f9777b53f4d
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/stats/lombscargle/implementations/tests/test_mle.py
|
b1quint/astropy
|
a170a74739e4356c169429a42e554f9777b53f4d
|
[
"BSD-3-Clause"
] | 5
|
2019-04-27T01:19:47.000Z
|
2020-09-20T15:15:19.000Z
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats.lombscargle.implementations.mle import design_matrix, periodic_fit
@pytest.fixture
def t():
rand = np.random.RandomState(42)
return 10 * rand.rand(10)
@pytest.mark.parametrize('freq', [1.0, 2])
@pytest.mark.parametrize('dy', [None, 2.0])
@pytest.mark.parametrize('bias', [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1. / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize('nterms', range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1. / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize('nterms', range(1, 4))
@pytest.mark.parametrize('freq', [1, 2])
@pytest.mark.parametrize('fit_mean', [True, False])
def test_exact_mle_fit(nterms, freq, fit_mean):
rand = np.random.RandomState(42)
t = 10 * rand.rand(30)
theta = -1 + rand.rand(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,
center_data=False, fit_mean=fit_mean)
assert_allclose(y, y_fit)
| 34.945455
| 85
| 0.605619
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats.lombscargle.implementations.mle import design_matrix, periodic_fit
@pytest.fixture
def t():
rand = np.random.RandomState(42)
return 10 * rand.rand(10)
@pytest.mark.parametrize('freq', [1.0, 2])
@pytest.mark.parametrize('dy', [None, 2.0])
@pytest.mark.parametrize('bias', [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1. / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize('nterms', range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1. / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize('nterms', range(1, 4))
@pytest.mark.parametrize('freq', [1, 2])
@pytest.mark.parametrize('fit_mean', [True, False])
def test_exact_mle_fit(nterms, freq, fit_mean):
rand = np.random.RandomState(42)
t = 10 * rand.rand(30)
theta = -1 + rand.rand(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,
center_data=False, fit_mean=fit_mean)
assert_allclose(y, y_fit)
| true
| true
|
f715a4a05c0ac41089e088d453bb1aff5563f056
| 29,345
|
py
|
Python
|
braintree/webhook_testing_gateway.py
|
maneeshd/braintree_python
|
4aa3f4b8a376ea81bf16a053d840efe55ae13675
|
[
"MIT"
] | 1
|
2019-05-23T10:08:54.000Z
|
2019-05-23T10:08:54.000Z
|
braintree/webhook_testing_gateway.py
|
maneeshd/braintree_python
|
4aa3f4b8a376ea81bf16a053d840efe55ae13675
|
[
"MIT"
] | null | null | null |
braintree/webhook_testing_gateway.py
|
maneeshd/braintree_python
|
4aa3f4b8a376ea81bf16a053d840efe55ae13675
|
[
"MIT"
] | 2
|
2019-05-06T01:10:41.000Z
|
2019-05-06T01:10:42.000Z
|
from braintree.util.crypto import Crypto
from braintree.webhook_notification import WebhookNotification
import sys
if sys.version_info[0] == 2:
from base64 import encodestring as encodebytes
else:
from base64 import encodebytes
from datetime import datetime
class WebhookTestingGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def sample_notification(self, kind, id, source_merchant_id=None):
payload = encodebytes(self.__sample_xml(kind, id, source_merchant_id))
hmac_payload = Crypto.sha1_hmac_hash(self.gateway.config.private_key, payload)
signature = "%s|%s" % (self.gateway.config.public_key, hmac_payload)
return {'bt_signature': signature, 'bt_payload': payload}
def __sample_xml(self, kind, id, source_merchant_id):
timestamp = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
source_merchant_id_xml = ''
if source_merchant_id is not None:
source_merchant_id_xml = '<source-merchant-id>%s</source-merchant-id>' % source_merchant_id
sample_xml = """
<notification>
<timestamp type="datetime">%s</timestamp>
<kind>%s</kind>
%s
<subject>%s</subject>
</notification>
""" % (timestamp, kind, source_merchant_id_xml, self.__subject_sample_xml(kind, id))
return sample_xml.encode('utf-8')
def __subject_sample_xml(self, kind, id):
if kind == WebhookNotification.Kind.Check:
return self.__check_sample_xml()
if kind == WebhookNotification.Kind.ConnectedMerchantStatusTransitioned:
return self.__connected_merchant_status_transitioned_xml(id)
if kind == WebhookNotification.Kind.ConnectedMerchantPayPalStatusChanged:
return self.__connected_merchant_paypal_status_changed_xml(id)
if kind == WebhookNotification.Kind.SubMerchantAccountApproved:
return self.__merchant_account_approved_sample_xml(id)
elif kind == WebhookNotification.Kind.SubMerchantAccountDeclined:
return self.__merchant_account_declined_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionDisbursed:
return self.__transaction_disbursed_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionSettled:
return self.__transaction_settled_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionSettlementDeclined:
return self.__transaction_settlement_declined_sample_xml(id)
elif kind == WebhookNotification.Kind.PartnerMerchantConnected:
return self.__partner_merchant_connected_sample_xml()
elif kind == WebhookNotification.Kind.PartnerMerchantDisconnected:
return self.__partner_merchant_disconnected_sample_xml()
elif kind == WebhookNotification.Kind.PartnerMerchantDeclined:
return self.__partner_merchant_declined_sample_xml()
elif kind == WebhookNotification.Kind.OAuthAccessRevoked:
return self.__oauth_access_revocation_sample_xml(id)
elif kind == WebhookNotification.Kind.DisbursementException:
return self.__disbursement_exception_sample_xml(id)
elif kind == WebhookNotification.Kind.Disbursement:
return self.__disbursement_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeOpened:
return self.__dispute_opened_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeLost:
return self.__dispute_lost_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeWon:
return self.__dispute_won_sample_xml(id)
elif kind == WebhookNotification.Kind.SubscriptionChargedSuccessfully:
return self.__subscription_charged_successfully_sample_xml(id)
elif kind == WebhookNotification.Kind.SubscriptionChargedUnsuccessfully:
return self.__subscription_charged_unsuccessfully_sample_xml(id)
elif kind == WebhookNotification.Kind.AccountUpdaterDailyReport:
return self.__account_updater_daily_report_sample_xml()
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
elif kind == WebhookNotification.Kind.IdealPaymentComplete:
return self.__ideal_payment_complete_sample_xml(id)
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
elif kind == WebhookNotification.Kind.IdealPaymentFailed:
return self.__ideal_payment_failed_sample_xml(id)
# NEXT_MAJOR_VERSION remove GrantedPaymentInstrumentUpdate
elif kind == WebhookNotification.Kind.GrantedPaymentInstrumentUpdate:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.GrantorUpdatedGrantedPaymentMethod:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.RecipientUpdatedGrantedPaymentMethod:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.PaymentMethodRevokedByCustomer:
return self.__payment_method_revoked_by_customer(id)
elif kind == WebhookNotification.Kind.LocalPaymentCompleted:
return self.__local_payment_completed()
else:
return self.__subscription_sample_xml(id)
def __check_sample_xml(self):
return """
<check type="boolean">
true
</check>
"""
def __transaction_disbursed_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<amount>100</amount>
<tax-amount>10</tax-amount>
<disbursement-details>
<settlement-amount>100</settlement-amount>
<settlement-currency-exchange-rate>10</settlement-currency-exchange-rate>
<disbursement-date type="datetime">2013-07-09T18:23:29Z</disbursement-date>
</disbursement-details>
</transaction>
""" % id
def __transaction_settled_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<status>settled</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
<tax-amount>0</tax-amount>
</transaction>
""" % id
def __transaction_settlement_declined_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<status>settlement_declined</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
<tax-amount>0</tax-amount>
</transaction>
""" % id
def __disbursement_exception_sample_xml(self, id):
return """
<disbursement>
<id>%s</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">false</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-09</disbursement-date>
<exception-message>bank_rejected</exception-message>
<follow-up-action>update_funding_information</follow-up-action>
</disbursement>
""" % id
def __disbursement_sample_xml(self, id):
return """
<disbursement>
<id>%s</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">true</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-09</disbursement-date>
<exception-message nil="true"/>
<follow-up-action nil="true"/>
</disbursement>
""" % id
def __dispute_opened_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_opened_sample_xml(id)
else:
return self.__new_dispute_opened_sample_xml(id)
def __dispute_lost_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_lost_sample_xml(id)
else:
return self.__new_dispute_lost_sample_xml(id)
def __dispute_won_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_won_sample_xml(id)
else:
return self.__new_dispute_won_sample_xml(id)
def __old_dispute_opened_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>open</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __old_dispute_lost_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>lost</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __old_dispute_won_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>won</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
<date-won type="date">2014-09-01</date-won>
</dispute>
""" % (id, id)
def __new_dispute_opened_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>open</status>
<updated-at type="datetime">2017-06-16T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
</status-history>
<evidence type="array"/>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __new_dispute_lost_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>lost</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>lost</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __new_dispute_won_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>won</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>won</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
<date-won type=\"date\">2014-09-01</date-won>
</dispute>
""" % (id, id)
def __subscription_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array"></transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % id
def __subscription_charged_successfully_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array">
<transaction>
<id>%s</id>
<status>submitted_for_settlement</status>
<amount>49.99</amount>
<tax_amount></tax_amount>
</transaction>
</transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % (id, id)
def __subscription_charged_unsuccessfully_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array">
<transaction>
<id>%s</id>
<status>failed</status>
<amount>49.99</amount>
<tax_amount></tax_amount>
</transaction>
</transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % (id, id)
def __merchant_account_approved_sample_xml(self, id):
return """
<merchant-account>
<id>%s</id>
<status>active</status>
<master-merchant-account>
<id>master_ma_for_%s</id>
<status>active</status>
</master-merchant-account>
</merchant-account>
""" % (id, id)
def __merchant_account_declined_sample_xml(self, id):
return """
<api-error-response>
<message>Credit score is too low</message>
<errors>
<errors type="array"/>
<merchant-account>
<errors type="array">
<error>
<code>82621</code>
<message>Credit score is too low</message>
<attribute type="symbol">base</attribute>
</error>
</errors>
</merchant-account>
</errors>
<merchant-account>
<id>%s</id>
<status>suspended</status>
<master-merchant-account>
<id>master_ma_for_%s</id>
<status>suspended</status>
</master-merchant-account>
</merchant-account>
</api-error-response>
""" % (id, id)
def __partner_merchant_connected_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
<public-key>public_key</public-key>
<private-key>private_key</private-key>
<merchant-public-id>public_id</merchant-public-id>
<client-side-encryption-key>cse_key</client-side-encryption-key>
</partner-merchant>
"""
def __partner_merchant_disconnected_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
"""
def __connected_merchant_status_transitioned_xml(self, id):
return """
<connected-merchant-status-transitioned>
<status>new_status</status>
<merchant-public-id>%s</merchant-public-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-status-transitioned>
""" % id
def __connected_merchant_paypal_status_changed_xml(self, id):
return """
<connected-merchant-paypal-status-changed>
<action>link</action>
<merchant-public-id>%s</merchant-public-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-paypal-status-changed>
""" % id
def __partner_merchant_declined_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
"""
def __oauth_access_revocation_sample_xml(self, id):
return """
<oauth-application-revocation>
<merchant-id>%s</merchant-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</oauth-application-revocation>
""" % id
def __account_updater_daily_report_sample_xml(self):
return """
<account-updater-daily-report>
<report-date type="date">2016-01-14</report-date>
<report-url>link-to-csv-report</report-url>
</account-updater-daily-report>
"""
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
def __ideal_payment_complete_sample_xml(self, id):
return """
<ideal-payment>
<id>%s</id>
<status>COMPLETE</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
""" % id
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
def __ideal_payment_failed_sample_xml(self, id):
return """
<ideal-payment>
<id>%s</id>
<status>FAILED</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
""" % id
def __granted_payment_instrument_update(self):
return """
<granted-payment-instrument-update>
<grant-owner-merchant-id>vczo7jqrpwrsi2px</grant-owner-merchant-id>
<grant-recipient-merchant-id>cf0i8wgarszuy6hc</grant-recipient-merchant-id>
<payment-method-nonce>
<nonce>ee257d98-de40-47e8-96b3-a6954ea7a9a4</nonce>
<consumed type="boolean">false</consumed>
<locked type="boolean">false</locked>
</payment-method-nonce>
<token>abc123z</token>
<updated-fields type="array">
<item>expiration-month</item>
<item>expiration-year</item>
</updated-fields>
</granted-payment-instrument-update>
"""
def __payment_method_revoked_by_customer(self, id):
return """
<paypal-account>
<billing-agreement-id>a-billing-agreement-id</billing-agreement-id>
<created-at type="datetime">2019-01-01T12:00:00Z</created-at>
<customer-id>a-customer-id</customer-id>
<default type="boolean">true</default>
<email>name@email.com</email>
<global-id>cGF5bWVudG1ldGhvZF9jaDZieXNz</global-id>
<image-url>https://assets.braintreegateway.com/payment_method_logo/paypal.png?environment=test</image-url>
<subscriptions type="array"/>
<token>%s</token>
<updated-at type="datetime">2019-01-02T12:00:00Z</updated-at>
<is-channel-initiated nil="true"/>
<payer-id>a-payer-id</payer-id>
<payer-info nil="true"/>
<limited-use-order-id nil="true"/>
<revoked-at type="datetime">2019-01-02T12:00:00Z</revoked-at>
</paypal-account>
""" % id
def __local_payment_completed(self):
return """
<local-payment>
<payment-id>a-payment-id</payment-id>
<payer-id>a-payer-id</payer-id>
<payment-method-nonce>ee257d98-de40-47e8-96b3-a6954ea7a9a4</payment-method-nonce>
<transaction>
<id>1</id>
<status>authorizing</status>
<amount>10.00</amount>
<order-id>order1234</order-id>
</transaction>
</local-payment>
"""
| 43.474074
| 129
| 0.566434
|
from braintree.util.crypto import Crypto
from braintree.webhook_notification import WebhookNotification
import sys
if sys.version_info[0] == 2:
from base64 import encodestring as encodebytes
else:
from base64 import encodebytes
from datetime import datetime
class WebhookTestingGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def sample_notification(self, kind, id, source_merchant_id=None):
payload = encodebytes(self.__sample_xml(kind, id, source_merchant_id))
hmac_payload = Crypto.sha1_hmac_hash(self.gateway.config.private_key, payload)
signature = "%s|%s" % (self.gateway.config.public_key, hmac_payload)
return {'bt_signature': signature, 'bt_payload': payload}
def __sample_xml(self, kind, id, source_merchant_id):
timestamp = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
source_merchant_id_xml = ''
if source_merchant_id is not None:
source_merchant_id_xml = '<source-merchant-id>%s</source-merchant-id>' % source_merchant_id
sample_xml = """
<notification>
<timestamp type="datetime">%s</timestamp>
<kind>%s</kind>
%s
<subject>%s</subject>
</notification>
""" % (timestamp, kind, source_merchant_id_xml, self.__subject_sample_xml(kind, id))
return sample_xml.encode('utf-8')
def __subject_sample_xml(self, kind, id):
if kind == WebhookNotification.Kind.Check:
return self.__check_sample_xml()
if kind == WebhookNotification.Kind.ConnectedMerchantStatusTransitioned:
return self.__connected_merchant_status_transitioned_xml(id)
if kind == WebhookNotification.Kind.ConnectedMerchantPayPalStatusChanged:
return self.__connected_merchant_paypal_status_changed_xml(id)
if kind == WebhookNotification.Kind.SubMerchantAccountApproved:
return self.__merchant_account_approved_sample_xml(id)
elif kind == WebhookNotification.Kind.SubMerchantAccountDeclined:
return self.__merchant_account_declined_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionDisbursed:
return self.__transaction_disbursed_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionSettled:
return self.__transaction_settled_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionSettlementDeclined:
return self.__transaction_settlement_declined_sample_xml(id)
elif kind == WebhookNotification.Kind.PartnerMerchantConnected:
return self.__partner_merchant_connected_sample_xml()
elif kind == WebhookNotification.Kind.PartnerMerchantDisconnected:
return self.__partner_merchant_disconnected_sample_xml()
elif kind == WebhookNotification.Kind.PartnerMerchantDeclined:
return self.__partner_merchant_declined_sample_xml()
elif kind == WebhookNotification.Kind.OAuthAccessRevoked:
return self.__oauth_access_revocation_sample_xml(id)
elif kind == WebhookNotification.Kind.DisbursementException:
return self.__disbursement_exception_sample_xml(id)
elif kind == WebhookNotification.Kind.Disbursement:
return self.__disbursement_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeOpened:
return self.__dispute_opened_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeLost:
return self.__dispute_lost_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeWon:
return self.__dispute_won_sample_xml(id)
elif kind == WebhookNotification.Kind.SubscriptionChargedSuccessfully:
return self.__subscription_charged_successfully_sample_xml(id)
elif kind == WebhookNotification.Kind.SubscriptionChargedUnsuccessfully:
return self.__subscription_charged_unsuccessfully_sample_xml(id)
elif kind == WebhookNotification.Kind.AccountUpdaterDailyReport:
return self.__account_updater_daily_report_sample_xml()
elif kind == WebhookNotification.Kind.IdealPaymentComplete:
return self.__ideal_payment_complete_sample_xml(id)
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
elif kind == WebhookNotification.Kind.IdealPaymentFailed:
return self.__ideal_payment_failed_sample_xml(id)
elif kind == WebhookNotification.Kind.GrantedPaymentInstrumentUpdate:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.GrantorUpdatedGrantedPaymentMethod:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.RecipientUpdatedGrantedPaymentMethod:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.PaymentMethodRevokedByCustomer:
return self.__payment_method_revoked_by_customer(id)
elif kind == WebhookNotification.Kind.LocalPaymentCompleted:
return self.__local_payment_completed()
else:
return self.__subscription_sample_xml(id)
def __check_sample_xml(self):
return """
<check type="boolean">
true
</check>
"""
def __transaction_disbursed_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<amount>100</amount>
<tax-amount>10</tax-amount>
<disbursement-details>
<settlement-amount>100</settlement-amount>
<settlement-currency-exchange-rate>10</settlement-currency-exchange-rate>
<disbursement-date type="datetime">2013-07-09T18:23:29Z</disbursement-date>
</disbursement-details>
</transaction>
""" % id
def __transaction_settled_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<status>settled</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
<tax-amount>0</tax-amount>
</transaction>
""" % id
def __transaction_settlement_declined_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<status>settlement_declined</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
<tax-amount>0</tax-amount>
</transaction>
""" % id
def __disbursement_exception_sample_xml(self, id):
return """
<disbursement>
<id>%s</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">false</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-09</disbursement-date>
<exception-message>bank_rejected</exception-message>
<follow-up-action>update_funding_information</follow-up-action>
</disbursement>
""" % id
def __disbursement_sample_xml(self, id):
return """
<disbursement>
<id>%s</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">true</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-09</disbursement-date>
<exception-message nil="true"/>
<follow-up-action nil="true"/>
</disbursement>
""" % id
def __dispute_opened_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_opened_sample_xml(id)
else:
return self.__new_dispute_opened_sample_xml(id)
def __dispute_lost_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_lost_sample_xml(id)
else:
return self.__new_dispute_lost_sample_xml(id)
def __dispute_won_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_won_sample_xml(id)
else:
return self.__new_dispute_won_sample_xml(id)
def __old_dispute_opened_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>open</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __old_dispute_lost_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>lost</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __old_dispute_won_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>won</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
<date-won type="date">2014-09-01</date-won>
</dispute>
""" % (id, id)
def __new_dispute_opened_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>open</status>
<updated-at type="datetime">2017-06-16T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
</status-history>
<evidence type="array"/>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __new_dispute_lost_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>lost</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>lost</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __new_dispute_won_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>won</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>won</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
<date-won type=\"date\">2014-09-01</date-won>
</dispute>
""" % (id, id)
def __subscription_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array"></transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % id
def __subscription_charged_successfully_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array">
<transaction>
<id>%s</id>
<status>submitted_for_settlement</status>
<amount>49.99</amount>
<tax_amount></tax_amount>
</transaction>
</transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % (id, id)
def __subscription_charged_unsuccessfully_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array">
<transaction>
<id>%s</id>
<status>failed</status>
<amount>49.99</amount>
<tax_amount></tax_amount>
</transaction>
</transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % (id, id)
def __merchant_account_approved_sample_xml(self, id):
return """
<merchant-account>
<id>%s</id>
<status>active</status>
<master-merchant-account>
<id>master_ma_for_%s</id>
<status>active</status>
</master-merchant-account>
</merchant-account>
""" % (id, id)
def __merchant_account_declined_sample_xml(self, id):
return """
<api-error-response>
<message>Credit score is too low</message>
<errors>
<errors type="array"/>
<merchant-account>
<errors type="array">
<error>
<code>82621</code>
<message>Credit score is too low</message>
<attribute type="symbol">base</attribute>
</error>
</errors>
</merchant-account>
</errors>
<merchant-account>
<id>%s</id>
<status>suspended</status>
<master-merchant-account>
<id>master_ma_for_%s</id>
<status>suspended</status>
</master-merchant-account>
</merchant-account>
</api-error-response>
""" % (id, id)
def __partner_merchant_connected_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
<public-key>public_key</public-key>
<private-key>private_key</private-key>
<merchant-public-id>public_id</merchant-public-id>
<client-side-encryption-key>cse_key</client-side-encryption-key>
</partner-merchant>
"""
def __partner_merchant_disconnected_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
"""
def __connected_merchant_status_transitioned_xml(self, id):
return """
<connected-merchant-status-transitioned>
<status>new_status</status>
<merchant-public-id>%s</merchant-public-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-status-transitioned>
""" % id
def __connected_merchant_paypal_status_changed_xml(self, id):
return """
<connected-merchant-paypal-status-changed>
<action>link</action>
<merchant-public-id>%s</merchant-public-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-paypal-status-changed>
""" % id
def __partner_merchant_declined_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
"""
def __oauth_access_revocation_sample_xml(self, id):
return """
<oauth-application-revocation>
<merchant-id>%s</merchant-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</oauth-application-revocation>
""" % id
def __account_updater_daily_report_sample_xml(self):
return """
<account-updater-daily-report>
<report-date type="date">2016-01-14</report-date>
<report-url>link-to-csv-report</report-url>
</account-updater-daily-report>
"""
def __ideal_payment_complete_sample_xml(self, id):
return """
<ideal-payment>
<id>%s</id>
<status>COMPLETE</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
""" % id
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
def __ideal_payment_failed_sample_xml(self, id):
return """
<ideal-payment>
<id>%s</id>
<status>FAILED</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
""" % id
def __granted_payment_instrument_update(self):
return """
<granted-payment-instrument-update>
<grant-owner-merchant-id>vczo7jqrpwrsi2px</grant-owner-merchant-id>
<grant-recipient-merchant-id>cf0i8wgarszuy6hc</grant-recipient-merchant-id>
<payment-method-nonce>
<nonce>ee257d98-de40-47e8-96b3-a6954ea7a9a4</nonce>
<consumed type="boolean">false</consumed>
<locked type="boolean">false</locked>
</payment-method-nonce>
<token>abc123z</token>
<updated-fields type="array">
<item>expiration-month</item>
<item>expiration-year</item>
</updated-fields>
</granted-payment-instrument-update>
"""
def __payment_method_revoked_by_customer(self, id):
return """
<paypal-account>
<billing-agreement-id>a-billing-agreement-id</billing-agreement-id>
<created-at type="datetime">2019-01-01T12:00:00Z</created-at>
<customer-id>a-customer-id</customer-id>
<default type="boolean">true</default>
<email>name@email.com</email>
<global-id>cGF5bWVudG1ldGhvZF9jaDZieXNz</global-id>
<image-url>https://assets.braintreegateway.com/payment_method_logo/paypal.png?environment=test</image-url>
<subscriptions type="array"/>
<token>%s</token>
<updated-at type="datetime">2019-01-02T12:00:00Z</updated-at>
<is-channel-initiated nil="true"/>
<payer-id>a-payer-id</payer-id>
<payer-info nil="true"/>
<limited-use-order-id nil="true"/>
<revoked-at type="datetime">2019-01-02T12:00:00Z</revoked-at>
</paypal-account>
""" % id
def __local_payment_completed(self):
return """
<local-payment>
<payment-id>a-payment-id</payment-id>
<payer-id>a-payer-id</payer-id>
<payment-method-nonce>ee257d98-de40-47e8-96b3-a6954ea7a9a4</payment-method-nonce>
<transaction>
<id>1</id>
<status>authorizing</status>
<amount>10.00</amount>
<order-id>order1234</order-id>
</transaction>
</local-payment>
"""
| true
| true
|
f715a55d3a4d0e4ed9e635af1fb7092bd4dc3fdc
| 2,188
|
py
|
Python
|
project-1-command-line/main.py
|
jadry92/Course-data-ing-with-python
|
57d4eb1564a2379497546ff28e02377fb07ba0b9
|
[
"MIT"
] | null | null | null |
project-1-command-line/main.py
|
jadry92/Course-data-ing-with-python
|
57d4eb1564a2379497546ff28e02377fb07ba0b9
|
[
"MIT"
] | null | null | null |
project-1-command-line/main.py
|
jadry92/Course-data-ing-with-python
|
57d4eb1564a2379497546ff28e02377fb07ba0b9
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import datetime
import csv
from requests.exceptions import HTTPError
from urllib3.exceptions import MaxRetryError
# local imports
from common import config
import news_page_objects as news
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _news_scraper(news_sites_uid):
host = config()['news_sites'][news_sites_uid]['url']
logging.info('Beginning scraper for {}'.format(host))
homepage = news.HomePage(news_sites_uid, host)
articles = []
for link in homepage.articles_links:
print(link)
article = _fetch_article(news_sites_uid, host, link)
if article:
articles.append(article)
_save_articles(news_sites_uid, articles)
print(len(articles))
def _save_articles(news_sites_uid, articles):
now = datetime.datetime.now().strftime('%Y-%m-%d')
out_file_name = '{news_sites_uid}_{datatime}_articles.csv'.format(
news_sites_uid=news_sites_uid,
datatime=now
)
csv_headers = list(filter(lambda property: not property.startswith('_'), dir(articles[0])))
with open(out_file_name, mode='w+') as f:
writer = csv.writer(f)
writer.writerow(csv_headers)
for article in articles:
row = [str(getattr(article, prop)) for prop in csv_headers]
writer.writerow(row)
def _fetch_article(news_sites_uid, host, link):
logger.info('Start fetching article at {}'.format(link))
article = None
try:
article = news.ArticlePage(news_sites_uid, link)
except (HTTPError, MaxRetryError) as e:
logger.error('The article coudn\'t be fetched')
if article and not article.body:
logger.warning('There isn\'t a body in this page. ')
return None
return article
if __name__ == '__main__':
parser = argparse.ArgumentParser()
news_sites_choices = list(config()['news_sites'].keys())
parser.add_argument('news_sites',
help='The news site that you want to scrape',
type=str,
choices=news_sites_choices)
args = parser.parse_args()
_news_scraper(args.news_sites)
| 29.567568
| 95
| 0.673675
|
import argparse
import logging
import datetime
import csv
from requests.exceptions import HTTPError
from urllib3.exceptions import MaxRetryError
from common import config
import news_page_objects as news
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _news_scraper(news_sites_uid):
host = config()['news_sites'][news_sites_uid]['url']
logging.info('Beginning scraper for {}'.format(host))
homepage = news.HomePage(news_sites_uid, host)
articles = []
for link in homepage.articles_links:
print(link)
article = _fetch_article(news_sites_uid, host, link)
if article:
articles.append(article)
_save_articles(news_sites_uid, articles)
print(len(articles))
def _save_articles(news_sites_uid, articles):
now = datetime.datetime.now().strftime('%Y-%m-%d')
out_file_name = '{news_sites_uid}_{datatime}_articles.csv'.format(
news_sites_uid=news_sites_uid,
datatime=now
)
csv_headers = list(filter(lambda property: not property.startswith('_'), dir(articles[0])))
with open(out_file_name, mode='w+') as f:
writer = csv.writer(f)
writer.writerow(csv_headers)
for article in articles:
row = [str(getattr(article, prop)) for prop in csv_headers]
writer.writerow(row)
def _fetch_article(news_sites_uid, host, link):
logger.info('Start fetching article at {}'.format(link))
article = None
try:
article = news.ArticlePage(news_sites_uid, link)
except (HTTPError, MaxRetryError) as e:
logger.error('The article coudn\'t be fetched')
if article and not article.body:
logger.warning('There isn\'t a body in this page. ')
return None
return article
if __name__ == '__main__':
parser = argparse.ArgumentParser()
news_sites_choices = list(config()['news_sites'].keys())
parser.add_argument('news_sites',
help='The news site that you want to scrape',
type=str,
choices=news_sites_choices)
args = parser.parse_args()
_news_scraper(args.news_sites)
| true
| true
|
f715a596a287133251f9a3c65e63acf439e485b9
| 18,541
|
py
|
Python
|
powerbot/models/order_entry.py
|
rogerarmstrong/python-samples
|
df73b5dab70090f820fc47096b0ae5490c7779b6
|
[
"Apache-2.0"
] | null | null | null |
powerbot/models/order_entry.py
|
rogerarmstrong/python-samples
|
df73b5dab70090f820fc47096b0ae5490c7779b6
|
[
"Apache-2.0"
] | null | null | null |
powerbot/models/order_entry.py
|
rogerarmstrong/python-samples
|
df73b5dab70090f820fc47096b0ae5490c7779b6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Powerbot Server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OrderEntry(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'side': 'str',
'prod': 'str',
'quantity': 'float',
'price': 'float',
'display_qty': 'int',
'contract_id': 'int',
'contract_name': 'str',
'cl_ordr_id': 'str',
'clearing_acct_type': 'str',
'ordr_exe_restriction': 'str',
'pre_arranged': 'bool',
'pre_arranged_acct': 'str',
'type': 'str',
'validity_res': 'str',
'state': 'str',
'validity_date': 'datetime',
'txt': 'str',
'ppd': 'int',
'dlvry_start': 'datetime',
'dlvry_end': 'datetime'
}
attribute_map = {
'side': 'side',
'prod': 'prod',
'quantity': 'quantity',
'price': 'price',
'display_qty': 'displayQty',
'contract_id': 'contractId',
'contract_name': 'contractName',
'cl_ordr_id': 'clOrdrId',
'clearing_acct_type': 'clearingAcctType',
'ordr_exe_restriction': 'ordrExeRestriction',
'pre_arranged': 'preArranged',
'pre_arranged_acct': 'preArrangedAcct',
'type': 'type',
'validity_res': 'validityRes',
'state': 'state',
'validity_date': 'validityDate',
'txt': 'txt',
'ppd': 'ppd',
'dlvry_start': 'dlvryStart',
'dlvry_end': 'dlvryEnd'
}
def __init__(self, side=None, prod=None, quantity=None, price=None, display_qty=None, contract_id=None, contract_name=None, cl_ordr_id=None, clearing_acct_type=None, ordr_exe_restriction='NON', pre_arranged=False, pre_arranged_acct=None, type='O', validity_res='GFS', state=None, validity_date=None, txt=None, ppd=None, dlvry_start=None, dlvry_end=None): # noqa: E501
"""OrderEntry - a model defined in Swagger""" # noqa: E501
self._side = None
self._prod = None
self._quantity = None
self._price = None
self._display_qty = None
self._contract_id = None
self._contract_name = None
self._cl_ordr_id = None
self._clearing_acct_type = None
self._ordr_exe_restriction = None
self._pre_arranged = None
self._pre_arranged_acct = None
self._type = None
self._validity_res = None
self._state = None
self._validity_date = None
self._txt = None
self._ppd = None
self._dlvry_start = None
self._dlvry_end = None
self.discriminator = None
if side is not None:
self.side = side
self.prod = prod
self.quantity = quantity
self.price = price
if display_qty is not None:
self.display_qty = display_qty
if contract_id is not None:
self.contract_id = contract_id
if contract_name is not None:
self.contract_name = contract_name
if cl_ordr_id is not None:
self.cl_ordr_id = cl_ordr_id
self.clearing_acct_type = clearing_acct_type
if ordr_exe_restriction is not None:
self.ordr_exe_restriction = ordr_exe_restriction
if pre_arranged is not None:
self.pre_arranged = pre_arranged
if pre_arranged_acct is not None:
self.pre_arranged_acct = pre_arranged_acct
if type is not None:
self.type = type
if validity_res is not None:
self.validity_res = validity_res
if state is not None:
self.state = state
if validity_date is not None:
self.validity_date = validity_date
if txt is not None:
self.txt = txt
if ppd is not None:
self.ppd = ppd
if dlvry_start is not None:
self.dlvry_start = dlvry_start
if dlvry_end is not None:
self.dlvry_end = dlvry_end
@property
def side(self):
"""Gets the side of this OrderEntry. # noqa: E501
:return: The side of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._side
@side.setter
def side(self, side):
"""Sets the side of this OrderEntry.
:param side: The side of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["SELL", "BUY"] # noqa: E501
if side not in allowed_values:
raise ValueError(
"Invalid value for `side` ({0}), must be one of {1}" # noqa: E501
.format(side, allowed_values)
)
self._side = side
@property
def prod(self):
"""Gets the prod of this OrderEntry. # noqa: E501
:return: The prod of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._prod
@prod.setter
def prod(self, prod):
"""Sets the prod of this OrderEntry.
:param prod: The prod of this OrderEntry. # noqa: E501
:type: str
"""
if prod is None:
raise ValueError("Invalid value for `prod`, must not be `None`") # noqa: E501
self._prod = prod
@property
def quantity(self):
"""Gets the quantity of this OrderEntry. # noqa: E501
:return: The quantity of this OrderEntry. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this OrderEntry.
:param quantity: The quantity of this OrderEntry. # noqa: E501
:type: float
"""
if quantity is None:
raise ValueError("Invalid value for `quantity`, must not be `None`") # noqa: E501
self._quantity = quantity
@property
def price(self):
"""Gets the price of this OrderEntry. # noqa: E501
:return: The price of this OrderEntry. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this OrderEntry.
:param price: The price of this OrderEntry. # noqa: E501
:type: float
"""
if price is None:
raise ValueError("Invalid value for `price`, must not be `None`") # noqa: E501
self._price = price
@property
def display_qty(self):
"""Gets the display_qty of this OrderEntry. # noqa: E501
:return: The display_qty of this OrderEntry. # noqa: E501
:rtype: int
"""
return self._display_qty
@display_qty.setter
def display_qty(self, display_qty):
"""Sets the display_qty of this OrderEntry.
:param display_qty: The display_qty of this OrderEntry. # noqa: E501
:type: int
"""
self._display_qty = display_qty
@property
def contract_id(self):
"""Gets the contract_id of this OrderEntry. # noqa: E501
:return: The contract_id of this OrderEntry. # noqa: E501
:rtype: int
"""
return self._contract_id
@contract_id.setter
def contract_id(self, contract_id):
"""Sets the contract_id of this OrderEntry.
:param contract_id: The contract_id of this OrderEntry. # noqa: E501
:type: int
"""
self._contract_id = contract_id
@property
def contract_name(self):
"""Gets the contract_name of this OrderEntry. # noqa: E501
Set a contract name instead of the contractId, and the attempt is made to look up the contract via it's name. If contractId is ist, the contractName field is ignored. # noqa: E501
:return: The contract_name of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._contract_name
@contract_name.setter
def contract_name(self, contract_name):
"""Sets the contract_name of this OrderEntry.
Set a contract name instead of the contractId, and the attempt is made to look up the contract via it's name. If contractId is ist, the contractName field is ignored. # noqa: E501
:param contract_name: The contract_name of this OrderEntry. # noqa: E501
:type: str
"""
self._contract_name = contract_name
@property
def cl_ordr_id(self):
"""Gets the cl_ordr_id of this OrderEntry. # noqa: E501
:return: The cl_ordr_id of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._cl_ordr_id
@cl_ordr_id.setter
def cl_ordr_id(self, cl_ordr_id):
"""Sets the cl_ordr_id of this OrderEntry.
:param cl_ordr_id: The cl_ordr_id of this OrderEntry. # noqa: E501
:type: str
"""
if cl_ordr_id is not None and len(cl_ordr_id) > 40:
raise ValueError("Invalid value for `cl_ordr_id`, length must be less than or equal to `40`") # noqa: E501
self._cl_ordr_id = cl_ordr_id
@property
def clearing_acct_type(self):
"""Gets the clearing_acct_type of this OrderEntry. # noqa: E501
:return: The clearing_acct_type of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._clearing_acct_type
@clearing_acct_type.setter
def clearing_acct_type(self, clearing_acct_type):
"""Sets the clearing_acct_type of this OrderEntry.
:param clearing_acct_type: The clearing_acct_type of this OrderEntry. # noqa: E501
:type: str
"""
if clearing_acct_type is None:
raise ValueError("Invalid value for `clearing_acct_type`, must not be `None`") # noqa: E501
self._clearing_acct_type = clearing_acct_type
@property
def ordr_exe_restriction(self):
"""Gets the ordr_exe_restriction of this OrderEntry. # noqa: E501
:return: The ordr_exe_restriction of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._ordr_exe_restriction
@ordr_exe_restriction.setter
def ordr_exe_restriction(self, ordr_exe_restriction):
"""Sets the ordr_exe_restriction of this OrderEntry.
:param ordr_exe_restriction: The ordr_exe_restriction of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["FOK", "IOC", "NON", "AON", "AU"] # noqa: E501
if ordr_exe_restriction not in allowed_values:
raise ValueError(
"Invalid value for `ordr_exe_restriction` ({0}), must be one of {1}" # noqa: E501
.format(ordr_exe_restriction, allowed_values)
)
self._ordr_exe_restriction = ordr_exe_restriction
@property
def pre_arranged(self):
"""Gets the pre_arranged of this OrderEntry. # noqa: E501
:return: The pre_arranged of this OrderEntry. # noqa: E501
:rtype: bool
"""
return self._pre_arranged
@pre_arranged.setter
def pre_arranged(self, pre_arranged):
"""Sets the pre_arranged of this OrderEntry.
:param pre_arranged: The pre_arranged of this OrderEntry. # noqa: E501
:type: bool
"""
self._pre_arranged = pre_arranged
@property
def pre_arranged_acct(self):
"""Gets the pre_arranged_acct of this OrderEntry. # noqa: E501
:return: The pre_arranged_acct of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._pre_arranged_acct
@pre_arranged_acct.setter
def pre_arranged_acct(self, pre_arranged_acct):
"""Sets the pre_arranged_acct of this OrderEntry.
:param pre_arranged_acct: The pre_arranged_acct of this OrderEntry. # noqa: E501
:type: str
"""
self._pre_arranged_acct = pre_arranged_acct
@property
def type(self):
"""Gets the type of this OrderEntry. # noqa: E501
:return: The type of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OrderEntry.
:param type: The type of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["B", "O", "I", "L", "S", "H", "C", "N", "E"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def validity_res(self):
"""Gets the validity_res of this OrderEntry. # noqa: E501
:return: The validity_res of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._validity_res
@validity_res.setter
def validity_res(self, validity_res):
"""Sets the validity_res of this OrderEntry.
:param validity_res: The validity_res of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["GFS", "GTD", "NON"] # noqa: E501
if validity_res not in allowed_values:
raise ValueError(
"Invalid value for `validity_res` ({0}), must be one of {1}" # noqa: E501
.format(validity_res, allowed_values)
)
self._validity_res = validity_res
@property
def state(self):
"""Gets the state of this OrderEntry. # noqa: E501
:return: The state of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this OrderEntry.
:param state: The state of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["ACTI", "HIBE"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def validity_date(self):
"""Gets the validity_date of this OrderEntry. # noqa: E501
:return: The validity_date of this OrderEntry. # noqa: E501
:rtype: datetime
"""
return self._validity_date
@validity_date.setter
def validity_date(self, validity_date):
"""Sets the validity_date of this OrderEntry.
:param validity_date: The validity_date of this OrderEntry. # noqa: E501
:type: datetime
"""
self._validity_date = validity_date
@property
def txt(self):
"""Gets the txt of this OrderEntry. # noqa: E501
:return: The txt of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._txt
@txt.setter
def txt(self, txt):
"""Sets the txt of this OrderEntry.
:param txt: The txt of this OrderEntry. # noqa: E501
:type: str
"""
if txt is not None and len(txt) > 250:
raise ValueError("Invalid value for `txt`, length must be less than or equal to `250`") # noqa: E501
self._txt = txt
@property
def ppd(self):
"""Gets the ppd of this OrderEntry. # noqa: E501
:return: The ppd of this OrderEntry. # noqa: E501
:rtype: int
"""
return self._ppd
@ppd.setter
def ppd(self, ppd):
"""Sets the ppd of this OrderEntry.
:param ppd: The ppd of this OrderEntry. # noqa: E501
:type: int
"""
self._ppd = ppd
@property
def dlvry_start(self):
"""Gets the dlvry_start of this OrderEntry. # noqa: E501
:return: The dlvry_start of this OrderEntry. # noqa: E501
:rtype: datetime
"""
return self._dlvry_start
@dlvry_start.setter
def dlvry_start(self, dlvry_start):
"""Sets the dlvry_start of this OrderEntry.
:param dlvry_start: The dlvry_start of this OrderEntry. # noqa: E501
:type: datetime
"""
self._dlvry_start = dlvry_start
@property
def dlvry_end(self):
"""Gets the dlvry_end of this OrderEntry. # noqa: E501
:return: The dlvry_end of this OrderEntry. # noqa: E501
:rtype: datetime
"""
return self._dlvry_end
@dlvry_end.setter
def dlvry_end(self, dlvry_end):
"""Sets the dlvry_end of this OrderEntry.
:param dlvry_end: The dlvry_end of this OrderEntry. # noqa: E501
:type: datetime
"""
self._dlvry_end = dlvry_end
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrderEntry):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.656878
| 372
| 0.584596
|
import pprint
import re
import six
class OrderEntry(object):
swagger_types = {
'side': 'str',
'prod': 'str',
'quantity': 'float',
'price': 'float',
'display_qty': 'int',
'contract_id': 'int',
'contract_name': 'str',
'cl_ordr_id': 'str',
'clearing_acct_type': 'str',
'ordr_exe_restriction': 'str',
'pre_arranged': 'bool',
'pre_arranged_acct': 'str',
'type': 'str',
'validity_res': 'str',
'state': 'str',
'validity_date': 'datetime',
'txt': 'str',
'ppd': 'int',
'dlvry_start': 'datetime',
'dlvry_end': 'datetime'
}
attribute_map = {
'side': 'side',
'prod': 'prod',
'quantity': 'quantity',
'price': 'price',
'display_qty': 'displayQty',
'contract_id': 'contractId',
'contract_name': 'contractName',
'cl_ordr_id': 'clOrdrId',
'clearing_acct_type': 'clearingAcctType',
'ordr_exe_restriction': 'ordrExeRestriction',
'pre_arranged': 'preArranged',
'pre_arranged_acct': 'preArrangedAcct',
'type': 'type',
'validity_res': 'validityRes',
'state': 'state',
'validity_date': 'validityDate',
'txt': 'txt',
'ppd': 'ppd',
'dlvry_start': 'dlvryStart',
'dlvry_end': 'dlvryEnd'
}
def __init__(self, side=None, prod=None, quantity=None, price=None, display_qty=None, contract_id=None, contract_name=None, cl_ordr_id=None, clearing_acct_type=None, ordr_exe_restriction='NON', pre_arranged=False, pre_arranged_acct=None, type='O', validity_res='GFS', state=None, validity_date=None, txt=None, ppd=None, dlvry_start=None, dlvry_end=None):
self._side = None
self._prod = None
self._quantity = None
self._price = None
self._display_qty = None
self._contract_id = None
self._contract_name = None
self._cl_ordr_id = None
self._clearing_acct_type = None
self._ordr_exe_restriction = None
self._pre_arranged = None
self._pre_arranged_acct = None
self._type = None
self._validity_res = None
self._state = None
self._validity_date = None
self._txt = None
self._ppd = None
self._dlvry_start = None
self._dlvry_end = None
self.discriminator = None
if side is not None:
self.side = side
self.prod = prod
self.quantity = quantity
self.price = price
if display_qty is not None:
self.display_qty = display_qty
if contract_id is not None:
self.contract_id = contract_id
if contract_name is not None:
self.contract_name = contract_name
if cl_ordr_id is not None:
self.cl_ordr_id = cl_ordr_id
self.clearing_acct_type = clearing_acct_type
if ordr_exe_restriction is not None:
self.ordr_exe_restriction = ordr_exe_restriction
if pre_arranged is not None:
self.pre_arranged = pre_arranged
if pre_arranged_acct is not None:
self.pre_arranged_acct = pre_arranged_acct
if type is not None:
self.type = type
if validity_res is not None:
self.validity_res = validity_res
if state is not None:
self.state = state
if validity_date is not None:
self.validity_date = validity_date
if txt is not None:
self.txt = txt
if ppd is not None:
self.ppd = ppd
if dlvry_start is not None:
self.dlvry_start = dlvry_start
if dlvry_end is not None:
self.dlvry_end = dlvry_end
@property
def side(self):
return self._side
@side.setter
def side(self, side):
allowed_values = ["SELL", "BUY"]
if side not in allowed_values:
raise ValueError(
"Invalid value for `side` ({0}), must be one of {1}"
.format(side, allowed_values)
)
self._side = side
@property
def prod(self):
return self._prod
@prod.setter
def prod(self, prod):
if prod is None:
raise ValueError("Invalid value for `prod`, must not be `None`")
self._prod = prod
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, quantity):
if quantity is None:
raise ValueError("Invalid value for `quantity`, must not be `None`")
self._quantity = quantity
@property
def price(self):
return self._price
@price.setter
def price(self, price):
if price is None:
raise ValueError("Invalid value for `price`, must not be `None`")
self._price = price
@property
def display_qty(self):
return self._display_qty
@display_qty.setter
def display_qty(self, display_qty):
self._display_qty = display_qty
@property
def contract_id(self):
return self._contract_id
@contract_id.setter
def contract_id(self, contract_id):
self._contract_id = contract_id
@property
def contract_name(self):
return self._contract_name
@contract_name.setter
def contract_name(self, contract_name):
self._contract_name = contract_name
@property
def cl_ordr_id(self):
return self._cl_ordr_id
@cl_ordr_id.setter
def cl_ordr_id(self, cl_ordr_id):
if cl_ordr_id is not None and len(cl_ordr_id) > 40:
raise ValueError("Invalid value for `cl_ordr_id`, length must be less than or equal to `40`")
self._cl_ordr_id = cl_ordr_id
@property
def clearing_acct_type(self):
return self._clearing_acct_type
@clearing_acct_type.setter
def clearing_acct_type(self, clearing_acct_type):
if clearing_acct_type is None:
raise ValueError("Invalid value for `clearing_acct_type`, must not be `None`")
self._clearing_acct_type = clearing_acct_type
@property
def ordr_exe_restriction(self):
return self._ordr_exe_restriction
@ordr_exe_restriction.setter
def ordr_exe_restriction(self, ordr_exe_restriction):
allowed_values = ["FOK", "IOC", "NON", "AON", "AU"]
if ordr_exe_restriction not in allowed_values:
raise ValueError(
"Invalid value for `ordr_exe_restriction` ({0}), must be one of {1}"
.format(ordr_exe_restriction, allowed_values)
)
self._ordr_exe_restriction = ordr_exe_restriction
@property
def pre_arranged(self):
return self._pre_arranged
@pre_arranged.setter
def pre_arranged(self, pre_arranged):
self._pre_arranged = pre_arranged
@property
def pre_arranged_acct(self):
return self._pre_arranged_acct
@pre_arranged_acct.setter
def pre_arranged_acct(self, pre_arranged_acct):
self._pre_arranged_acct = pre_arranged_acct
@property
def type(self):
return self._type
@type.setter
def type(self, type):
allowed_values = ["B", "O", "I", "L", "S", "H", "C", "N", "E"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def validity_res(self):
return self._validity_res
@validity_res.setter
def validity_res(self, validity_res):
allowed_values = ["GFS", "GTD", "NON"]
if validity_res not in allowed_values:
raise ValueError(
"Invalid value for `validity_res` ({0}), must be one of {1}"
.format(validity_res, allowed_values)
)
self._validity_res = validity_res
@property
def state(self):
return self._state
@state.setter
def state(self, state):
allowed_values = ["ACTI", "HIBE"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def validity_date(self):
return self._validity_date
@validity_date.setter
def validity_date(self, validity_date):
self._validity_date = validity_date
@property
def txt(self):
return self._txt
@txt.setter
def txt(self, txt):
if txt is not None and len(txt) > 250:
raise ValueError("Invalid value for `txt`, length must be less than or equal to `250`")
self._txt = txt
@property
def ppd(self):
return self._ppd
@ppd.setter
def ppd(self, ppd):
self._ppd = ppd
@property
def dlvry_start(self):
return self._dlvry_start
@dlvry_start.setter
def dlvry_start(self, dlvry_start):
self._dlvry_start = dlvry_start
@property
def dlvry_end(self):
return self._dlvry_end
@dlvry_end.setter
def dlvry_end(self, dlvry_end):
self._dlvry_end = dlvry_end
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, OrderEntry):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f715a6870b84172a6bce55c32434e579a2ef0c2a
| 6,133
|
py
|
Python
|
output/models/ms_data/element/elem_z018_xsd/elem_z018.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/element/elem_z018_xsd/elem_z018.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/element/elem_z018_xsd/elem_z018.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Dict, List, Optional
@dataclass
class Signatures:
class Meta:
name = "signatures"
w3_org_2000_09_xmldsig_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "http://www.w3.org/2000/09/xmldsig#",
}
)
@dataclass
class Zzz:
class Meta:
name = "zzz"
signatures: Optional[Signatures] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Yyy:
class Meta:
name = "yyy"
zzz: Optional[Zzz] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Xxx:
class Meta:
name = "xxx"
yyy: Optional[Yyy] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Www:
class Meta:
name = "www"
xxx: Optional[Xxx] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Uuu:
class Meta:
name = "uuu"
www: Optional[Www] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ttt:
class Meta:
name = "ttt"
uuu: Optional[Uuu] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Sss:
class Meta:
name = "sss"
ttt: Optional[Ttt] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Rrr:
class Meta:
name = "rrr"
sss: Optional[Sss] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Qqq:
class Meta:
name = "qqq"
rrr: Optional[Rrr] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ppp:
class Meta:
name = "ppp"
qqq: Optional[Qqq] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ooo:
class Meta:
name = "ooo"
ppp: Optional[Ppp] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Nnn:
class Meta:
name = "nnn"
ooo: Optional[Ooo] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Mmm:
class Meta:
name = "mmm"
nnn: Optional[Nnn] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Lll:
class Meta:
name = "lll"
mmm: Optional[Mmm] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Kkk:
class Meta:
name = "kkk"
lll: Optional[Lll] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Jjj:
class Meta:
name = "jjj"
kkk: Optional[Kkk] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Iii:
class Meta:
name = "iii"
jjj: Optional[Jjj] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Hhh:
class Meta:
name = "hhh"
iii: Optional[Iii] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ggg:
class Meta:
name = "ggg"
hhh: Optional[Hhh] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Fff:
class Meta:
name = "fff"
ggg: Optional[Ggg] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Eee:
class Meta:
name = "eee"
fff: Optional[Fff] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ddd:
class Meta:
name = "ddd"
eee: Optional[Eee] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ccc:
class Meta:
name = "ccc"
ddd: Optional[Ddd] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Bbb:
class Meta:
name = "bbb"
ccc: Optional[Ccc] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Aaa:
class Meta:
name = "aaa"
bbb: Optional[Bbb] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Root:
class Meta:
name = "root"
aaa: Optional[Aaa] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
w3_org_xml_1998_namespace_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "http://www.w3.org/XML/1998/namespace",
}
)
| 15.806701
| 65
| 0.459971
|
from dataclasses import dataclass, field
from typing import Dict, List, Optional
@dataclass
class Signatures:
class Meta:
name = "signatures"
w3_org_2000_09_xmldsig_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "http://www.w3.org/2000/09/xmldsig#",
}
)
@dataclass
class Zzz:
class Meta:
name = "zzz"
signatures: Optional[Signatures] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Yyy:
class Meta:
name = "yyy"
zzz: Optional[Zzz] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Xxx:
class Meta:
name = "xxx"
yyy: Optional[Yyy] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Www:
class Meta:
name = "www"
xxx: Optional[Xxx] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Uuu:
class Meta:
name = "uuu"
www: Optional[Www] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ttt:
class Meta:
name = "ttt"
uuu: Optional[Uuu] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Sss:
class Meta:
name = "sss"
ttt: Optional[Ttt] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Rrr:
class Meta:
name = "rrr"
sss: Optional[Sss] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Qqq:
class Meta:
name = "qqq"
rrr: Optional[Rrr] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ppp:
class Meta:
name = "ppp"
qqq: Optional[Qqq] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ooo:
class Meta:
name = "ooo"
ppp: Optional[Ppp] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Nnn:
class Meta:
name = "nnn"
ooo: Optional[Ooo] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Mmm:
class Meta:
name = "mmm"
nnn: Optional[Nnn] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Lll:
class Meta:
name = "lll"
mmm: Optional[Mmm] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Kkk:
class Meta:
name = "kkk"
lll: Optional[Lll] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Jjj:
class Meta:
name = "jjj"
kkk: Optional[Kkk] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Iii:
class Meta:
name = "iii"
jjj: Optional[Jjj] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Hhh:
class Meta:
name = "hhh"
iii: Optional[Iii] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ggg:
class Meta:
name = "ggg"
hhh: Optional[Hhh] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Fff:
class Meta:
name = "fff"
ggg: Optional[Ggg] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Eee:
class Meta:
name = "eee"
fff: Optional[Fff] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ddd:
class Meta:
name = "ddd"
eee: Optional[Eee] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ccc:
class Meta:
name = "ccc"
ddd: Optional[Ddd] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Bbb:
class Meta:
name = "bbb"
ccc: Optional[Ccc] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Aaa:
class Meta:
name = "aaa"
bbb: Optional[Bbb] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Root:
class Meta:
name = "root"
aaa: Optional[Aaa] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
w3_org_xml_1998_namespace_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "http://www.w3.org/XML/1998/namespace",
}
)
| true
| true
|
f715a6b708707b2792f67edc44f6ef7fd6f14e2c
| 58,348
|
py
|
Python
|
torch/nn/parallel/distributed.py
|
chaekit/pytorch
|
132f5c1f36698361149ea99ca3504bd2acfdc19f
|
[
"Intel"
] | null | null | null |
torch/nn/parallel/distributed.py
|
chaekit/pytorch
|
132f5c1f36698361149ea99ca3504bd2acfdc19f
|
[
"Intel"
] | null | null | null |
torch/nn/parallel/distributed.py
|
chaekit/pytorch
|
132f5c1f36698361149ea99ca3504bd2acfdc19f
|
[
"Intel"
] | null | null | null |
import copy
import inspect
import itertools
import logging
import os
import warnings
from contextlib import contextmanager
from typing import NamedTuple
import torch
import torch.distributed as dist
RPC_AVAILABLE = False
if dist.is_available():
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.distributed_c10d import _get_default_group
if torch.distributed.rpc.is_available():
RPC_AVAILABLE = True
from torch.distributed.rpc import RRef
from torch._utils import _get_device_index
from ..modules import Module
from ._functions import _get_stream
from .scatter_gather import scatter_kwargs, gather, is_namedtuple
def _find_tensors(obj):
r"""
Recursively find all tensors contained in the specified object.
"""
if RPC_AVAILABLE and isinstance(obj, RRef):
# If the current node is the owner of the RRef, unwrap it and try to
# find Tensors.
# TODO: Expand to remote RRefs.
if obj.is_owner():
return _find_tensors(obj.local_value())
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
def _dump_DDP_relevant_env_vars():
relevant_env_vars = [
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"MASTER_PORT",
"MASTER_ADDR",
"CUDA_VISIBLE_DEVICES",
"GLOO_SOCKET_IFNAME",
"GLOO_DEVICE_TRANSPORT",
"NCCL_SOCKET_IFNAME",
"NCCL_BLOCKING_WAIT",
"NCCL_DEBUG",
"NCCL_DEBUG_SUBSYS",
"NCCL_IB_DISABLE",
# More NCCL env vars:
"NCCL_P2P_DISABLE",
"NCCL_P2P_LEVEL",
"NCCL_SHM_DISABLE",
"NCCL_SOCKET_NTHREADS",
"NCCL_NSOCKS_PERTHREAD",
"NCCL_BUFFSIZE",
"NCCL_NTHREADS",
"NCCL_RINGS",
"NCCL_MAX_NCHANNELS",
"NCCL_MIN_NCHANNELS",
"NCCL_CHECKS_DISABLE",
"NCCL_CHECK_POINTERS",
"NCCL_LAUNCH_MODE",
"NCCL_IB_HCA",
"NCCL_IB_TIMEOUT",
"NCCL_IB_RETRY_CNT",
"NCCL_IB_GID_INDEX",
"NCCL_IB_SL",
"NCCL_IB_TC",
"NCCL_IB_AR_THRESHOLD",
"NCCL_IB_CUDA_SUPPORT",
"NCCL_NET_GDR_LEVEL",
"NCCL_NET_GDR_READ",
"NCCL_SINGLE_RING_THRESHOLD",
"NCCL_LL_THRESHOLD",
"NCCL_TREE_THRESHOLD",
"NCCL_ALGO",
"NCCL_PROTO",
"NCCL_IGNORE_CPU_AFFINITY",
"NCCL_DEBUG_FILE",
"NCCL_COLLNET_ENABLE",
"NCCL_TOPO_FILE",
"NCCL_TOPO_DUMP_FILE",
]
formatted_output = ""
for var in relevant_env_vars:
value = os.environ[var] if var in os.environ else "N/A"
formatted_output += "env:%s=%s\n" % (var, value)
print(formatted_output)
class _DDPUnevenInputsConfig(NamedTuple):
ddp_join_enabled: bool
ddp_join_divide_by_initial_world_size: bool
class DistributedDataParallel(Module):
r"""Implements distributed data parallelism that is based on
``torch.distributed`` package at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine and each device, and
each such replica handles a portion of the input. During the backwards
pass, gradients from each node are averaged.
The batch size should be larger than the number of GPUs used locally.
See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.
The same constraints on input as in :class:`torch.nn.DataParallel` apply.
Creation of this class requires that ``torch.distributed`` to be already
initialized, by calling :func:`torch.distributed.init_process_group`.
``DistributedDataParallel`` is proven to be significantly faster than
:class:`torch.nn.DataParallel` for single-node multi-GPU data
parallel training.
To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn
up ``N`` processes, ensuring that each process exclusively works on a single
GPU from 0 to N-1. This can be done by either setting
``CUDA_VISIBLE_DEVICES`` for every process or by calling:
>>> torch.cuda.set_device(i)
where i is from 0 to N-1. In each process, you should refer the following
to construct this module:
>>> torch.distributed.init_process_group(
>>> backend='nccl', world_size=N, init_method='...'
>>> )
>>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)
In order to spawn up multiple processes per node, you can use either
``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.
.. note::
Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__
for a brief introduction to all features related to distributed training.
.. note::
``DistributedDataParallel`` can be used in conjunction with
:class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce
per-rank optimizer states memory footprint. Please refer to
`ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__
for more details.
.. note:: ``nccl`` backend is currently the fastest and highly recommended
backend when using GPUs. This applies to both single-node and
multi-node distributed training.
.. note:: This module also supports mixed-precision distributed training.
This means that your model can have different types of parameters such
as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these
mixed types of parameters will just work fine.
.. note:: If you use ``torch.save`` on one process to checkpoint the module,
and ``torch.load`` on some other processes to recover it, make sure that
``map_location`` is configured properly for every process. Without
``map_location``, ``torch.load`` would recover the module to devices
where the module was saved from.
.. note:: When a model is trained on ``M`` nodes with ``batch=N``, the
gradient will be ``M`` times smaller when compared to the same model
trained on a single node with ``batch=M*N`` if the loss is summed (NOT
averaged as usual) across instances in a batch (because the gradients
between different nodes are averaged). You should take this into
consideration when you want to obtain a mathematically equivalent
training process compared to the local training counterpart. But in most
cases, you can just treat a DistributedDataParallel wrapped model, a
DataParallel wrapped model and an ordinary model on a single GPU as the
same (E.g. using the same learning rate for equivalent batch size).
.. note::
Parameters are never broadcast between processes. The module performs
an all-reduce step on gradients and assumes that they will be modified
by the optimizer in all processes in the same way. Buffers
(e.g. BatchNorm stats) are broadcast from the module in process of rank
0, to all other replicas in the system in every iteration.
.. note::
If you are using DistributedDataParallel in conjunction with the
:ref:`distributed-rpc-framework`, you should always use
:meth:`torch.distributed.autograd.backward` to compute gradients and
:class:`torch.distributed.optim.DistributedOptimizer` for optimizing
parameters.
Example::
>>> import torch.distributed.autograd as dist_autograd
>>> from torch.nn.parallel import DistributedDataParallel as DDP
>>> from torch import optim
>>> from torch.distributed.optim import DistributedOptimizer
>>> from torch.distributed.rpc import RRef
>>>
>>> t1 = torch.rand((3, 3), requires_grad=True)
>>> t2 = torch.rand((3, 3), requires_grad=True)
>>> rref = rpc.remote("worker1", torch.add, args=(t1, t2))
>>> ddp_model = DDP(my_model)
>>>
>>> # Setup optimizer
>>> optimizer_params = [rref]
>>> for param in ddp_model.parameters():
>>> optimizer_params.append(RRef(param))
>>>
>>> dist_optim = DistributedOptimizer(
>>> optim.SGD,
>>> optimizer_params,
>>> lr=0.05,
>>> )
>>>
>>> with dist_autograd.context() as context_id:
>>> pred = ddp_model(rref.to_here())
>>> loss = loss_func(pred, loss)
>>> dist_autograd.backward(context_id, loss)
>>> dist_optim.step()
.. note::
To let a non-DDP model load a state dict from a DDP model,
:meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present`
needs to be applied to strip the prefix "module." in the DDP state dict before loading.
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) are distributed synchronization
points. Take that into account in case different processes might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
Same applies to buffers.
.. warning::
This module assumes all parameters are registered in the model of each
distributed processes are in the same order. The module itself will
conduct gradient ``allreduce`` following the reverse order of the
registered parameters of the model. In other words, it is users'
responsibility to ensure that each distributed process has the exact
same model and thus the exact same parameter registration order.
.. warning::
This module allows parameters with non-rowmajor-contiguous strides.
For example, your model may contain some parameters whose
:class:`torch.memory_format` is ``torch.contiguous_format``
and others whose format is ``torch.channels_last``. However,
corresponding parameters in different processes must have the
same strides.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. warning::
If you plan on using this module with a ``nccl`` backend or a ``gloo``
backend (that uses Infiniband), together with a DataLoader that uses
multiple workers, please change the multiprocessing start method to
``forkserver`` (Python 3 only) or ``spawn``. Unfortunately
Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will
likely experience deadlocks if you don't change this setting.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
.. warning::
You should never try to change your model's parameters after wrapping
up your model with ``DistributedDataParallel``. Because, when
wrapping up your model with ``DistributedDataParallel``, the constructor
of ``DistributedDataParallel`` will register the additional gradient
reduction functions on all the parameters of the model itself at the
time of construction. If you change the model's parameters afterwards,
gradient redunction functions no longer match the correct set of
parameters.
.. warning::
Using ``DistributedDataParallel`` in conjunction with the
:ref:`distributed-rpc-framework` is experimental and subject to change.
.. warning::
The ``gradient_as_bucket_view`` mode does not yet work with Automatic
Mixed Precision (AMP). AMP maintains stashed gradients that are used for
unscaling gradients. With ``gradient_as_bucket_view=True``, these
stashed gradients will point to communication buckets in the first
iteration. In the next iteration, the communication buckets are mutated
and thus these stashed gradients will be unexpectedly mutated as well,
which might lead to wrong results.
Args:
module (Module): module to be parallelized
device_ids (list of int or torch.device): CUDA devices.
1) For single-device modules, ``device_ids`` can
contain exactly one device id, which represents the only
CUDA device where the input module corresponding to this process resides.
Alternatively, ``device_ids`` can also be ``None``.
2) For multi-device modules and CPU modules,
``device_ids`` must be ``None``.
When ``device_ids`` is ``None`` for both cases,
both the input data for the forward pass and the actual module
must be placed on the correct device.
(default: ``None``)
output_device (int or torch.device): Device location of output for
single-device CUDA modules. For multi-device modules and
CPU modules, it must be ``None``, and the module itself
dictates the output location. (default: ``device_ids[0]``
for single-device modules)
broadcast_buffers (bool): Flag that enables syncing (broadcasting)
buffers of the module at beginning of the ``forward``
function. (default: ``True``)
process_group: The process group to be used for distributed data
all-reduction. If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into
multiple buckets so that gradient reduction of each
bucket can potentially overlap with backward computation.
:attr:`bucket_cap_mb` controls the bucket size in
MegaBytes (MB). (default: 25)
find_unused_parameters (bool): Traverse the autograd graph from all
tensors contained in the return value of the
wrapped module's ``forward`` function. Parameters
that don't receive gradients as part of this
graph are preemptively marked as being ready to
be reduced. Note that all ``forward`` outputs
that are derived from module parameters must
participate in calculating loss and later the
gradient computation. If they don't, this wrapper
will hang waiting for autograd to produce
gradients for those parameters. Any outputs
derived from module parameters that are otherwise
unused can be detached from the autograd graph
using ``torch.Tensor.detach``. (default: ``False``)
check_reduction: This argument is deprecated.
gradient_as_bucket_view (bool): This is a prototype feature and subject
to changes. When set to ``True``, gradients will be views
pointing to different offsets of ``allreduce`` communication
buckets. This can reduce peak memory usage, where the
saved memory size will be equal to the total gradients
size. Moreover, it avoids the overhead of copying between
gradients and ``allreduce`` communication buckets. When
gradients are views, ``detach_()`` cannot be called on the
gradients. If hitting such errors, please fix it by
referring to the :meth:`~torch.optim.Optimizer.zero_grad`
function in ``torch/optim/optimizer.py`` as a solution.
Attributes:
module (Module): the module to be parallelized.
Example::
>>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
>>> net = torch.nn.parallel.DistributedDataParallel(model, pg)
"""
def __init__(
self,
module,
device_ids=None,
output_device=None,
dim=0,
broadcast_buffers=True,
process_group=None,
bucket_cap_mb=25,
find_unused_parameters=False,
check_reduction=False,
gradient_as_bucket_view=False,
):
super(DistributedDataParallel, self).__init__()
assert any((p.requires_grad for p in module.parameters())), (
"DistributedDataParallel is not needed when a module "
"doesn't have any parameter that requires a gradient."
)
if device_ids is not None and len(device_ids) > 1:
raise ValueError("device_ids can only be None or contain a single element.")
self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1
distinct_device_types = {p.device.type for p in module.parameters()}
if len(distinct_device_types) != 1:
raise ValueError(
"DistributedDataParallel's input module must be on "
"the same type of devices, but input module parameters locate in {}.".format(
distinct_device_types
)
)
self.device_type = list(distinct_device_types)[0]
if (
device_ids is None
or len(device_ids) == 0 # For backward compatibility.
or self.device_type == "cpu"
or self.is_multi_device_module
):
if device_ids or output_device:
raise ValueError(
"DistributedDataParallel device_ids and output_device arguments "
"only work with single-device/multiple-device GPU modules or CPU modules, "
"but got device_ids {}, output_device {}, and module parameters {}.".format(
device_ids,
output_device,
{p.device for p in module.parameters()},
)
)
self.device_ids = None
self.output_device = None
else:
self.device_ids = [_get_device_index(x, True) for x in device_ids]
if output_device is None:
output_device = device_ids[0]
self.output_device = _get_device_index(output_device, True)
if process_group is None:
self.process_group = _get_default_group()
else:
self.process_group = process_group
self.dim = dim
self.module = module
self.device = list(self.module.parameters())[0].device
self.broadcast_buffers = broadcast_buffers
self.find_unused_parameters = find_unused_parameters
self.require_backward_grad_sync = True
self.require_forward_param_sync = True
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False
)
self.gradient_as_bucket_view = gradient_as_bucket_view
if hasattr(module, "_ddp_params_and_buffers_to_ignore"):
self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore
else:
self.parameters_to_ignore = []
if check_reduction:
# This argument is no longer used since the reducer
# will ensure reduction completes even if some parameters
# do not receive gradients.
warnings.warn(
"The `check_reduction` argument in `DistributedDataParallel` "
"module is deprecated. Please avoid using it."
)
# Check that a module does not have Uninitialized parameters
for param in module.parameters():
if isinstance(param, torch.nn.parameter.UninitializedParameter):
raise RuntimeError(
"Modules with uninitialized parameters can't be used with `DistributedDataParallel`. "
"Run a dummy forward pass to correctly initialize the modules"
)
# used for intra-node param sync and inter-node sync as wel
self.broadcast_bucket_size = int(250 * 1024 * 1024)
# reduction bucket size
self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)
# Whether to perform input tensor CPU to GPU copies on a side-stream
self.use_side_stream_for_tensor_copies = (
os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1"
)
# TODO(wayi@): Remove this field since SPMD is no longer supported,
# and also remove all the relevant unnecessary loops.
# Module replication within process (single-process multi device)
self._module_copies = [self.module]
# Build parameters for reducer.
parameters, expect_sparse_gradient = self._build_params_for_reducer()
# Verify model equivalence.
dist._verify_model_across_ranks(self.process_group, parameters)
# Sync params and buffers. Ensures all DDP models start off at the same value.
self._sync_params_and_buffers(authoritative_rank=0)
# Builds reducer.
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _sync_params_and_buffers(self, authoritative_rank=0):
module_states = []
for name, param in self.module.state_dict().items():
if name not in self.parameters_to_ignore:
module_states.append(param)
if len(module_states) > 0:
self._distributed_broadcast_coalesced(
module_states, self.broadcast_bucket_size, authoritative_rank
)
def _ddp_init_helper(self, parameters, expect_sparse_gradient):
"""
Initialization helper function that does the following:
(1) bucketing the parameters for reductions
(2) resetting the bucketing states
(3) registering the grad hooks
(4) Logging constructin-time DDP logging data
(5) passing a handle of DDP to SyncBatchNorm Layer
"""
# The bucket size limit is specified in the constructor.
# Additionally, we allow for a single small bucket for parameters
# that are defined first, such that their gradients don't spill into
# a much larger bucket, adding unnecessary latency after gradient
# computation finishes. Experiments showed 1MB is a reasonable value.
bucket_indices = dist._compute_bucket_assignment_by_size(
parameters[0],
[dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],
expect_sparse_gradient[0],
)
# Note: reverse list of buckets because we want to approximate the
# order in which their gradients are produced, and assume they
# are used in the forward pass in the order they are defined.
self.reducer = dist.Reducer(
parameters,
list(reversed(bucket_indices)),
self.process_group,
expect_sparse_gradient,
self.bucket_bytes_cap,
self.find_unused_parameters,
self.gradient_as_bucket_view,
)
self.logger = dist.Logger(self.reducer)
# Set logging data that can be got during construction time.
self.logger.set_construction_data_and_log(
self.module.__class__.__name__,
[] if self.device_ids is None else self.device_ids,
-1 if self.output_device is None else self.output_device,
self.broadcast_buffers,
)
# passing a handle to torch.nn.SyncBatchNorm layer
self._passing_sync_batchnorm_handle(self._module_copies)
def __getstate__(self):
self._check_default_group()
attrs = copy.copy(self.__dict__)
del attrs["process_group"]
del attrs["reducer"]
del attrs["logger"]
return attrs
def __setstate__(self, state):
# If serializable, then the process group should be the default one
self.process_group = _get_default_group()
super(DistributedDataParallel, self).__setstate__(state)
self.__dict__.setdefault("require_forward_param_sync", True)
self.__dict__.setdefault("require_backward_grad_sync", True)
parameters, expect_sparse_gradient = self._build_params_for_reducer()
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _build_params_for_reducer(self):
# Build tuple of (module, parameter) for all parameters that require grads.
modules_and_parameters = [
[
(module, parameter)
for module_name, module in replica.named_modules()
for parameter in [
param
# Note that we access module.named_parameters instead of
# parameters(module). parameters(module) is only needed in the
# single-process multi device case, where it accesses replicated
# parameters through _former_parameters.
for param_name, param in module.named_parameters(recurse=False)
if param.requires_grad
and f"{module_name}.{param_name}"
not in self.parameters_to_ignore
]
]
for replica in self._module_copies
]
# Deduplicate any parameters that might be shared across child modules.
memo = set()
modules_and_parameters = [
# "p not in memo" is the deduplication check.
# "not memo.add(p)" is always True, and it's only there to cause "add(p)" if needed.
[(m, p) for m, p in replica_mps if p not in memo and not memo.add(p)]
for replica_mps in modules_and_parameters
]
# Build list of parameters.
parameters = [
list(parameter for _, parameter in replica)
for replica in modules_and_parameters
]
# Checks if a module will produce a sparse gradient.
def produces_sparse_gradient(module):
if isinstance(module, torch.nn.Embedding) or isinstance(
module, torch.nn.EmbeddingBag
):
return module.sparse
return False
# Build list of booleans indicating whether or not to expect sparse
# gradients for the corresponding parameters.
expect_sparse_gradient = [
list(produces_sparse_gradient(module) for module, _ in replica)
for replica in modules_and_parameters
]
# The following modules_params and modules_buffers are used for
# param/buffer sync in _sync_params.
self.modules_params = [
list(self._get_parameters(m)) for m in self._module_copies
]
# Collect buffers for modules, filtering out buffers that should be ignored.
named_module_buffers = [
[(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]
for m in self._module_copies
]
self.modules_buffers = [
[
buffer
for (buffer, buffer_name) in module_buffers
if buffer_name not in self.parameters_to_ignore
]
for module_buffers in named_module_buffers
]
return parameters, expect_sparse_gradient
def _get_parameters(self, m, recurse=True):
"""
Returns a generator of module parameters
"""
def model_parameters(m):
ps = (
m._former_parameters.values()
if hasattr(m, "_former_parameters")
else m.parameters(recurse=False)
)
for p in ps:
yield p
for m in m.modules() if recurse else [m]:
for p in model_parameters(m):
yield p
def _check_default_group(self):
pickle_not_supported = False
try:
if self.process_group != _get_default_group():
pickle_not_supported = True
except RuntimeError:
pickle_not_supported = True
if pickle_not_supported:
raise RuntimeError(
"DDP Pickling/Unpickling are only supported "
"when using DDP with the default process "
"group. That is, when you have called "
"init_process_group and have not passed "
"process_group argument to DDP constructor"
)
@contextmanager
def no_sync(self):
r"""
A context manager to disable gradient synchronizations across DDP
processes. Within this context, gradients will be accumulated on module
variables, which will later be synchronized in the first
forward-backward pass exiting the context.
Example::
>>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)
>>> with ddp.no_sync():
>>> for input in inputs:
>>> ddp(input).backward() # no synchronization, accumulate grads
>>> ddp(another_input).backward() # synchronize grads
"""
old_require_backward_grad_sync = self.require_backward_grad_sync
self.require_backward_grad_sync = False
try:
yield
finally:
self.require_backward_grad_sync = old_require_backward_grad_sync
def forward(self, *inputs, **kwargs):
self.reducer.save_thread_local_state()
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.logger.set_runtime_stats_and_log()
self.reducer.prepare_for_forward()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
ones = torch.ones(1, device=self.device)
work = dist.all_reduce(ones, group=self.process_group, async_op=True)
self.reducer._set_forward_pass_work_handle(
work,
self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size,
)
# Calling _rebuild_buckets before forward compuation,
# It may allocate new buckets before deallocating old buckets
# inside _rebuild_buckets. To save peak memory usage,
# call _rebuild_buckets before the peak memory usage increases
# during forward computation.
# This should be called only once during whole training period.
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
logging.info("Reducer buckets have been rebuilt in this iteration.")
if self.require_forward_param_sync:
self._sync_params()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
# Notify joined ranks whether they should sync in backwards pass or not.
self._check_global_requires_backward_grad_sync(is_joined_rank=False)
if self.device_ids:
inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])
output = self.module(*inputs[0], **kwargs[0])
else:
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
self.require_forward_param_sync = False
return output
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def _recursive_to(self, inputs, target_gpu):
r"""
Recursively moves input to the target_gpu.
"""
def to_map(obj):
if isinstance(obj, torch.Tensor):
if not self.use_side_stream_for_tensor_copies:
return (obj.to(target_gpu),)
else:
# Perform CPU -> GPU copies in a background stream. This code is
# motivated from similar logic in torch/nn/parallel/_functions.py
stream = _get_stream(target_gpu)
with torch.cuda.stream(stream):
output = obj.to(target_gpu)
# synchronize with the copy stream
with torch.cuda.device(target_gpu):
current_stream = torch.cuda.current_stream()
# Sync the current stream with the copy stream
current_stream.wait_stream(stream)
# Ensure tensor memory is not reused until work on
# main stream is complete
output.record_stream(current_stream)
return (output,)
if is_namedtuple(obj):
return [type(obj)(*args) for args in zip(*map(to_map, obj))]
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(to_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return [list(i) for i in zip(*map(to_map, obj))]
if isinstance(obj, dict) and len(obj) > 0:
return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]
return [obj]
# Avoid reference cycle
try:
res = to_map(inputs)
finally:
to_map = None
return res
def to_kwargs(self, inputs, kwargs, device_id):
inputs = self._recursive_to(inputs, device_id) if inputs else []
kwargs = self._recursive_to(kwargs, device_id) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def train(self, mode=True):
super(DistributedDataParallel, self).train(mode)
for module in self._module_copies[1:]:
module.train(mode)
return self
# When running in join mode, schedules an allreduce to match the one in the
# forward pass to determine the no. of currently active processes and whether
# all processes have joined.
def _schedule_shadow_all_reduce_for_fwd_pass(self):
all_active_procs = torch.zeros(1, device=self.device)
dist.all_reduce(all_active_procs, group=self.process_group)
return all_active_procs.item()
# When running in join mode, schedules an allreduce to notify joined ranks
# of whether backwards pass synchronization will run this iteraton or not.
def _check_global_requires_backward_grad_sync(self, is_joined_rank):
if not is_joined_rank and self.require_backward_grad_sync:
requires_sync_tensor = torch.ones(1, device=self.device)
else:
requires_sync_tensor = torch.zeros(1, device=self.device)
work = dist.all_reduce(
requires_sync_tensor, group=self.process_group, async_op=True
)
return work, requires_sync_tensor
# When running in join mode, checks and performs sync of module buffers if
# the models have buffers that should be synchronized in the forward pass.
def _check_and_sync_module_buffers(self):
if self.will_sync_module_buffers():
authoritative_rank = self._find_common_rank(self._distributed_rank, False)
self._distributed_broadcast_coalesced(
self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank
)
# When running in join model, agrees upon a common rank and broadcast model
# parameters to all other ranks.
def _sync_final_model(self, is_last_joiner):
# Agree upon the process that will be the authoritative model copy.
# The current rank is a candidate for being the authoritative copy if
# is_last_joiner=True. We break ties via picking the larger rank.
self._authoritative_rank = self._find_common_rank(
self._distributed_rank, is_last_joiner
)
self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)
# Schedule allreduce ops to match those scheduled in the reducer's backward
# pass.
def _match_all_reduce_for_bwd_pass(self):
allreduce_work = []
# Schedule allreduce in the same order as Reducer schedules them, i.e.
# the order of the buckets. Retrieving the bucket order from the reducer
# ensures that we keep the same order in join mode, such as when bucket
# order is rebuilt dynamically.
all_bucket_tensors = self.reducer.get_bucket_tensors()
for bucket_tensors in all_bucket_tensors:
# Joined processes contribute zero gradient. In the case that
# divide_by_initial_world_size=True, we divide grads by the static
# world size, if not, the dividing factor is reduced by the number
# of joined processes.
zero_tensors = [torch.zeros_like(t) for t in bucket_tensors]
work = self.process_group.allreduce(zero_tensors)
allreduce_work.append(work)
for work in allreduce_work:
work.wait()
# Allreduces the used parameter mapping across ranks.
def _match_unused_params_allreduce(self):
locally_used_param_maps = self.reducer._get_local_used_maps()
self.process_group.allreduce(locally_used_param_maps)
@contextmanager
def join(self, divide_by_initial_world_size=True, enable=True):
r"""
A context manager to be used in conjunction with an instance of
:class:`torch.nn.parallel.DistributedDataParallel` to be
able to train with uneven inputs across participating processes.
This context manager will keep track of already-joined DDP processes,
and "shadow" the forward and backward passes by inserting collective
communication operations to match with the ones created by non-joined
DDP processes. This will ensure each collective call has a corresponding
call by already-joined DDP processes, preventing hangs or errors that
would otherwise happen when training with uneven inputs across
processes.
Once all DDP processes have joined, the context manager will broadcast
the model corresponding to the last joined process to all processes to
ensure the model is the same across all processes
(which is guaranteed by DDP).
To use this to enable training with uneven inputs across processes,
simply wrap this context manager around your training loop. No further
modifications to the model or data loading is required.
.. warning::
This module currently does not support custom distributed collective
operations in the forward pass, such as ``SyncBatchNorm`` or other
custom defined collectives in the model's forward pass.
Args:
divide_by_initial_world_size (bool): If ``True``, will divide
gradients by the initial ``world_size`` DDP training was launched
with. If ``False``, will compute the effective world size
(number of ranks that have not depleted their inputs yet) and
divide gradients by that during allreduce. Set
``divide_by_initial_world_size=True`` to ensure every input
sample including the uneven inputs have equal weight in terms of
how much they contribute to the global gradient. This is
achieved by always dividing the gradient by the initial
``world_size`` even when we encounter uneven inputs. If you set
this to ``False``, we divide the gradient by the remaining
number of nodes. This ensures parity with training on a smaller
``world_size`` although it also means the uneven inputs would
contribute more towards the global gradient. Typically, you
would want to set this to ``True`` for cases where the last few
inputs of your training job are uneven. In extreme cases, where
there is a large discrepancy in the number of inputs, setting
this to ``False`` might provide better results.
enable (bool): Whether to enable uneven input detection or not. Pass
in ``enable=False`` to disable in cases where you know that
inputs are even across participating processes. Default is
``True``.
Example::
>>> import torch
>>> import torch.distributed as dist
>>> import os
>>> import torch.multiprocessing as mp
>>> import torch.nn as nn
>>> # On each spawned worker
>>> def worker(rank):
>>> dist.init_process_group("nccl", rank=rank, world_size=2)
>>> torch.cuda.set_device(rank)
>>> model = nn.Linear(1, 1, bias=False).to(rank)
>>> model = torch.nn.parallel.DistributedDataParallel(
>>> model, device_ids=[rank], output_device=rank
>>> )
>>> # Rank 1 gets one more input than rank 0.
>>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]
>>> with model.join():
>>> for _ in range(5):
>>> for inp in inputs:
>>> loss = model(inp).sum()
>>> loss.backward()
>>> # Without the join() API, the below synchronization will hang
>>> # blocking for rank 1's allreduce to complete.
>>> torch.cuda.synchronize(device=rank)
"""
# Log uneven input API usage.
self.logger._set_uneven_input_join()
try:
has_error = False
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=enable,
ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,
)
yield
except Exception as e:
# Set to skip any processing in the finally block.
has_error = True
raise e
finally:
# Skip any processing to let the exception immediately be raised if
# there was one.
if enable and not has_error:
all_procs_joined = False
is_last_joiner = True
i = 0
WARN_THRESHOLD = 1000
warnings.simplefilter("once")
while not all_procs_joined:
if i > WARN_THRESHOLD:
my_rank = self._distributed_rank
warnings.warn(
"Detected uneven input skew of greater "
f"than {WARN_THRESHOLD}. This means that rank {my_rank} "
f"has at least {WARN_THRESHOLD} fewer inputs than "
"other currently active ranks. This level of skew could "
"lead to performance degradation during training."
)
# Schedules allreduce to match fwd pass allreduce in non-joined procs
num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()
if num_active_procs == 0:
all_procs_joined = True
else:
# Some DDP process still needs to be joined.
if is_last_joiner:
is_last_joiner = False
# It will rebuild buckets only once during training period
self.reducer._rebuild_buckets()
# Schedule a corresponding broadcast if we are syncing module
# buffers in the forward pass.
self._check_and_sync_module_buffers()
(
work,
should_sync_backwards_tensor,
) = self._check_global_requires_backward_grad_sync(
is_joined_rank=True
)
work.wait()
# If nonzero, then we should sync in the bwd pass.
should_sync_backwards = should_sync_backwards_tensor.item() != 0
# Forward param sync is disabled in the next iteration
# if we are skipping grad sync this iteration. Hence, we
# set require_forward_param_sync appropriately here.
self.require_forward_param_sync = should_sync_backwards
if not should_sync_backwards:
continue
# Schedules one allreduce per gradient bucket to match
# the backwards pass allreduce.
self._match_all_reduce_for_bwd_pass()
# Check if we need to allreduce locally unused params.
if self.find_unused_parameters:
self._match_unused_params_allreduce()
# It will push rebuilt params only once during training period
self.reducer._push_all_rebuilt_params()
i += 1
# All procs joined. Agree on authoritative rank and broadcast the model.
self._sync_final_model(is_last_joiner)
def register_comm_hook(self, state: object, hook: callable):
r"""
Registers a communication hook which is an enhancement that provides a
flexible hook to users where they can specify how DDP aggregates gradients
across multiple workers.
This hook would be very useful for researchers to try out new ideas. For
example, this hook can be used to implement several algorithms like GossipGrad
and gradient compression which involve different communication strategies for
parameter syncs while running Distributed DataParallel training.
Args:
state (object): Passed to the hook to maintain any state information during the training process.
Examples include error feedback in gradient compression,
peers to communicate with next in GossipGrad, etc.
It is locally stored by each worker
and shared by all the gradient tensors on the worker.
hook (callable): Averages gradient tensors across workers and defined as:
``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future``:
This function is called once the bucket is ready. The
hook can perform whatever processing is needed and return
a Future indicating completion of any async work (ex: allreduce).
If the hook doesn't perform any communication, it can also
just return a completed Future. The Future should hold the
new value of grad bucket's tensors. Once a bucket is ready,
c10d reducer would call this hook and use the tensors returned
by the Future and copy grads to individual parameters.
We also provide an API called ``get_future`` to retrieve a
Future associated with the completion of ``c10d.ProcessGroup.work``.
.. warning ::
Grad bucket's tensors will not be predivided by world_size. User is responsible
to divide by the world_size in case of operations like allreduce.
.. warning ::
DDP communication hook can only be registered once and should be registered
before calling backward.
.. warning ::
The Future object that hook returns should contain a result that has the same
shape with the tensors inside grad bucket.
.. warning ::
DDP communication hook does not support single-process multiple-device mode.
Gradbucket tensors should consist of only a single tensor.
.. warning ::
``get_future`` API supports only NCCL backend and will return a ``torch._C.Future``
which is an internal type and should be used with caution. It can still be used by
``register_comm_hook`` API, but it is subject to some subtle differences compared
to ``torch.futures.Future``.
.. warning ::
DDP communication hook is experimental and subject to change.
Example::
Below is an example of a noop hook that returns the same tensors.
>>> def noop(state: object, bucket: dist.GradBucket): -> torch.futures.Future
>>> fut = torch.futures.Future()
>>> fut.set_result(bucket.get_tensors())
>>> return fut
>>> ddp.register_comm_hook(state = None, hook = noop)
Example::
Below is an example of a Parallel SGD algorithm where gradients are encoded before
allreduce, and then decoded after allreduce.
>>> def encode_and_decode(state: object, bucket: dist.GradBucket): -> torch.futures.Future
>>> tensors = [t / process_group.world_size for t in bucket.get_tensors()]
>>> encoded_tensors = encode(tensors) # encode gradients
>>> fut = process_group.allreduce(encoded_tensors).get_future()
>>> # Define the then callback to decode.
>>> def decode(fut):
>>> decoded_tensors = decode(fut.value()) # decode gradients
>>> return decoded_tensors
>>> return fut.then(decode)
>>> ddp.register_comm_hook(state = None, hook = encode_and_decode)
"""
self._check_comm_hook(hook)
self.logger._set_comm_hook_name(hook.__qualname__)
dist._register_comm_hook(self.reducer, state, hook)
def _register_builtin_comm_hook(self, comm_hook_type):
r"""
Registers a built-in communication hook that specifies how DDP
aggregates gradients across multiple workers.
The built-in hooks aim to provide efficient C++ implementations for certain hooks,
which might not be as efficient if implemented in Python using a Python communication hook.
Args:
comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as
ALLREDUCE, FP16_COMPRESS, etc.
.. warning ::
DDP communication hook can only be registered once and should be registered
before calling backward.
.. warning ::
DDP communication hook does not support single-process multiple-device mode.
Gradbucket tensors should consist of only a single tensor.
.. warning ::
DDP communication hook is experimental and subject to change.
Example::
Below is an example of a FP16 compression where gradients are
compressed into 16-bit floating-point numbers before allreduce, and
then decompressed after allreduce.
>>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)
"""
self.logger._set_comm_hook_name(str(comm_hook_type))
dist._register_builtin_comm_hook(self.reducer, comm_hook_type)
def _distributed_broadcast_coalesced(
self, tensors, buffer_size, authoritative_rank=0
):
dist._broadcast_coalesced(
self.process_group, tensors, buffer_size, authoritative_rank
)
def will_sync_module_buffers(self):
return (
self.require_forward_param_sync
and self.broadcast_buffers
and len(self.modules_buffers[0]) > 0
)
def _find_common_rank(self, input_rank, rank_cond):
# -1 indicates that this rank is not under consideration to be the
# common_rank
rank_to_use = torch.tensor(
[input_rank if rank_cond else -1],
device=self.device,
)
dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)
if rank_to_use.item() == -1:
raise ValueError(
"BUG! Expected rank_cond to be true for at least one process."
)
return rank_to_use.item()
def _sync_params(self):
with torch.no_grad():
# module buffer sync
if self.will_sync_module_buffers():
# Synchronize buffers across processes.
# If we are running DDP with the join manager, we have to agree
# upon a rank to sync module buffers from, since rank 0 may
# already have been joined and have stale module buffers.
if self.ddp_uneven_inputs_config.ddp_join_enabled:
authoritative_rank = self._find_common_rank(
self._distributed_rank, True
)
else:
# The process with rank 0 is considered the authoritative copy.
authoritative_rank = 0
self._distributed_broadcast_coalesced(
self.modules_buffers[0],
self.broadcast_bucket_size,
authoritative_rank,
)
def _passing_sync_batchnorm_handle(self, module_copies):
for dev_idx, module in enumerate(module_copies):
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm):
assert (
self.device_type != "cpu"
), "SyncBatchNorm layers only work with GPU modules"
layer._specify_ddp_gpu_num(1)
def _check_comm_hook(self, hook):
if not callable(hook):
raise TypeError("Communication hook must be callable.")
sig = inspect.signature(hook)
if (
sig.parameters["bucket"].annotation != inspect._empty
and sig.parameters["bucket"].annotation != dist.GradBucket
):
raise ValueError(
"Communication hook: bucket annotation should be dist.GradBucket."
)
if sig.return_annotation != inspect._empty and (
sig.return_annotation != torch.futures.Future
and sig.return_annotation != torch._C.Future
):
raise ValueError(
"Communication hook: return annotation should be torch.futures.Future or torch._C.Future."
)
@property
def _distributed_rank(self):
return dist.get_rank(self.process_group)
@staticmethod
def _set_params_and_buffers_to_ignore_for_model(
module, params_and_buffers_to_ignore
):
# This is a workaround to set parameters and buffers DDP should ignore
# during synchronization. It will be removed when the API is finalized
# as part of addressing https://github.com/pytorch/pytorch/issues/43690.
module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore
def get_ddp_logging_data(self):
r"""
This interface can be called after DistributedDataParallel() is
constructed. It returns DDPLoggingData for debugging and analysis.
More detailed explanation of the fields in DDPLoggingData are in
``torch/c10/util/Logging.h``.
"""
return self.logger._get_ddp_logging_data()
def set_ddp_runtime_logging_sample_rate(self, sample_rate):
r"""
This interface allows users to set sample_rate of collecting
runtime stats. The runtime stats will be recorded for the
first 10 iterations, after 10 iteratons runtime stats will be
recorded once every "sample_rate" training iterations. In
default, runtime stats are recorded for the first 10 iterations,
after 10 iterations runtime stats are recorded once every
"kDDPRuntimeLoggingSampleRate=100" training iterations.
"""
if sample_rate < 1:
raise ValueError(
"DDP runtime logging sample rate should be equal or greater than 1"
)
self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)
| 46.015773
| 116
| 0.622935
|
import copy
import inspect
import itertools
import logging
import os
import warnings
from contextlib import contextmanager
from typing import NamedTuple
import torch
import torch.distributed as dist
RPC_AVAILABLE = False
if dist.is_available():
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.distributed_c10d import _get_default_group
if torch.distributed.rpc.is_available():
RPC_AVAILABLE = True
from torch.distributed.rpc import RRef
from torch._utils import _get_device_index
from ..modules import Module
from ._functions import _get_stream
from .scatter_gather import scatter_kwargs, gather, is_namedtuple
def _find_tensors(obj):
if RPC_AVAILABLE and isinstance(obj, RRef):
if obj.is_owner():
return _find_tensors(obj.local_value())
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
def _dump_DDP_relevant_env_vars():
relevant_env_vars = [
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"MASTER_PORT",
"MASTER_ADDR",
"CUDA_VISIBLE_DEVICES",
"GLOO_SOCKET_IFNAME",
"GLOO_DEVICE_TRANSPORT",
"NCCL_SOCKET_IFNAME",
"NCCL_BLOCKING_WAIT",
"NCCL_DEBUG",
"NCCL_DEBUG_SUBSYS",
"NCCL_IB_DISABLE",
"NCCL_P2P_DISABLE",
"NCCL_P2P_LEVEL",
"NCCL_SHM_DISABLE",
"NCCL_SOCKET_NTHREADS",
"NCCL_NSOCKS_PERTHREAD",
"NCCL_BUFFSIZE",
"NCCL_NTHREADS",
"NCCL_RINGS",
"NCCL_MAX_NCHANNELS",
"NCCL_MIN_NCHANNELS",
"NCCL_CHECKS_DISABLE",
"NCCL_CHECK_POINTERS",
"NCCL_LAUNCH_MODE",
"NCCL_IB_HCA",
"NCCL_IB_TIMEOUT",
"NCCL_IB_RETRY_CNT",
"NCCL_IB_GID_INDEX",
"NCCL_IB_SL",
"NCCL_IB_TC",
"NCCL_IB_AR_THRESHOLD",
"NCCL_IB_CUDA_SUPPORT",
"NCCL_NET_GDR_LEVEL",
"NCCL_NET_GDR_READ",
"NCCL_SINGLE_RING_THRESHOLD",
"NCCL_LL_THRESHOLD",
"NCCL_TREE_THRESHOLD",
"NCCL_ALGO",
"NCCL_PROTO",
"NCCL_IGNORE_CPU_AFFINITY",
"NCCL_DEBUG_FILE",
"NCCL_COLLNET_ENABLE",
"NCCL_TOPO_FILE",
"NCCL_TOPO_DUMP_FILE",
]
formatted_output = ""
for var in relevant_env_vars:
value = os.environ[var] if var in os.environ else "N/A"
formatted_output += "env:%s=%s\n" % (var, value)
print(formatted_output)
class _DDPUnevenInputsConfig(NamedTuple):
ddp_join_enabled: bool
ddp_join_divide_by_initial_world_size: bool
class DistributedDataParallel(Module):
def __init__(
self,
module,
device_ids=None,
output_device=None,
dim=0,
broadcast_buffers=True,
process_group=None,
bucket_cap_mb=25,
find_unused_parameters=False,
check_reduction=False,
gradient_as_bucket_view=False,
):
super(DistributedDataParallel, self).__init__()
assert any((p.requires_grad for p in module.parameters())), (
"DistributedDataParallel is not needed when a module "
"doesn't have any parameter that requires a gradient."
)
if device_ids is not None and len(device_ids) > 1:
raise ValueError("device_ids can only be None or contain a single element.")
self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1
distinct_device_types = {p.device.type for p in module.parameters()}
if len(distinct_device_types) != 1:
raise ValueError(
"DistributedDataParallel's input module must be on "
"the same type of devices, but input module parameters locate in {}.".format(
distinct_device_types
)
)
self.device_type = list(distinct_device_types)[0]
if (
device_ids is None
or len(device_ids) == 0
or self.device_type == "cpu"
or self.is_multi_device_module
):
if device_ids or output_device:
raise ValueError(
"DistributedDataParallel device_ids and output_device arguments "
"only work with single-device/multiple-device GPU modules or CPU modules, "
"but got device_ids {}, output_device {}, and module parameters {}.".format(
device_ids,
output_device,
{p.device for p in module.parameters()},
)
)
self.device_ids = None
self.output_device = None
else:
self.device_ids = [_get_device_index(x, True) for x in device_ids]
if output_device is None:
output_device = device_ids[0]
self.output_device = _get_device_index(output_device, True)
if process_group is None:
self.process_group = _get_default_group()
else:
self.process_group = process_group
self.dim = dim
self.module = module
self.device = list(self.module.parameters())[0].device
self.broadcast_buffers = broadcast_buffers
self.find_unused_parameters = find_unused_parameters
self.require_backward_grad_sync = True
self.require_forward_param_sync = True
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False
)
self.gradient_as_bucket_view = gradient_as_bucket_view
if hasattr(module, "_ddp_params_and_buffers_to_ignore"):
self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore
else:
self.parameters_to_ignore = []
if check_reduction:
warnings.warn(
"The `check_reduction` argument in `DistributedDataParallel` "
"module is deprecated. Please avoid using it."
)
for param in module.parameters():
if isinstance(param, torch.nn.parameter.UninitializedParameter):
raise RuntimeError(
"Modules with uninitialized parameters can't be used with `DistributedDataParallel`. "
"Run a dummy forward pass to correctly initialize the modules"
)
# used for intra-node param sync and inter-node sync as wel
self.broadcast_bucket_size = int(250 * 1024 * 1024)
# reduction bucket size
self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)
# Whether to perform input tensor CPU to GPU copies on a side-stream
self.use_side_stream_for_tensor_copies = (
os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1"
)
# TODO(wayi@): Remove this field since SPMD is no longer supported,
# and also remove all the relevant unnecessary loops.
# Module replication within process (single-process multi device)
self._module_copies = [self.module]
# Build parameters for reducer.
parameters, expect_sparse_gradient = self._build_params_for_reducer()
# Verify model equivalence.
dist._verify_model_across_ranks(self.process_group, parameters)
# Sync params and buffers. Ensures all DDP models start off at the same value.
self._sync_params_and_buffers(authoritative_rank=0)
# Builds reducer.
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _sync_params_and_buffers(self, authoritative_rank=0):
module_states = []
for name, param in self.module.state_dict().items():
if name not in self.parameters_to_ignore:
module_states.append(param)
if len(module_states) > 0:
self._distributed_broadcast_coalesced(
module_states, self.broadcast_bucket_size, authoritative_rank
)
def _ddp_init_helper(self, parameters, expect_sparse_gradient):
# The bucket size limit is specified in the constructor.
# Additionally, we allow for a single small bucket for parameters
# that are defined first, such that their gradients don't spill into
bucket_indices = dist._compute_bucket_assignment_by_size(
parameters[0],
[dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],
expect_sparse_gradient[0],
)
self.reducer = dist.Reducer(
parameters,
list(reversed(bucket_indices)),
self.process_group,
expect_sparse_gradient,
self.bucket_bytes_cap,
self.find_unused_parameters,
self.gradient_as_bucket_view,
)
self.logger = dist.Logger(self.reducer)
self.logger.set_construction_data_and_log(
self.module.__class__.__name__,
[] if self.device_ids is None else self.device_ids,
-1 if self.output_device is None else self.output_device,
self.broadcast_buffers,
)
self._passing_sync_batchnorm_handle(self._module_copies)
def __getstate__(self):
self._check_default_group()
attrs = copy.copy(self.__dict__)
del attrs["process_group"]
del attrs["reducer"]
del attrs["logger"]
return attrs
def __setstate__(self, state):
self.process_group = _get_default_group()
super(DistributedDataParallel, self).__setstate__(state)
self.__dict__.setdefault("require_forward_param_sync", True)
self.__dict__.setdefault("require_backward_grad_sync", True)
parameters, expect_sparse_gradient = self._build_params_for_reducer()
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _build_params_for_reducer(self):
modules_and_parameters = [
[
(module, parameter)
for module_name, module in replica.named_modules()
for parameter in [
param
for param_name, param in module.named_parameters(recurse=False)
if param.requires_grad
and f"{module_name}.{param_name}"
not in self.parameters_to_ignore
]
]
for replica in self._module_copies
]
memo = set()
modules_and_parameters = [
[(m, p) for m, p in replica_mps if p not in memo and not memo.add(p)]
for replica_mps in modules_and_parameters
]
# Build list of parameters.
parameters = [
list(parameter for _, parameter in replica)
for replica in modules_and_parameters
]
# Checks if a module will produce a sparse gradient.
def produces_sparse_gradient(module):
if isinstance(module, torch.nn.Embedding) or isinstance(
module, torch.nn.EmbeddingBag
):
return module.sparse
return False
# Build list of booleans indicating whether or not to expect sparse
# gradients for the corresponding parameters.
expect_sparse_gradient = [
list(produces_sparse_gradient(module) for module, _ in replica)
for replica in modules_and_parameters
]
# The following modules_params and modules_buffers are used for
# param/buffer sync in _sync_params.
self.modules_params = [
list(self._get_parameters(m)) for m in self._module_copies
]
# Collect buffers for modules, filtering out buffers that should be ignored.
named_module_buffers = [
[(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]
for m in self._module_copies
]
self.modules_buffers = [
[
buffer
for (buffer, buffer_name) in module_buffers
if buffer_name not in self.parameters_to_ignore
]
for module_buffers in named_module_buffers
]
return parameters, expect_sparse_gradient
def _get_parameters(self, m, recurse=True):
def model_parameters(m):
ps = (
m._former_parameters.values()
if hasattr(m, "_former_parameters")
else m.parameters(recurse=False)
)
for p in ps:
yield p
for m in m.modules() if recurse else [m]:
for p in model_parameters(m):
yield p
def _check_default_group(self):
pickle_not_supported = False
try:
if self.process_group != _get_default_group():
pickle_not_supported = True
except RuntimeError:
pickle_not_supported = True
if pickle_not_supported:
raise RuntimeError(
"DDP Pickling/Unpickling are only supported "
"when using DDP with the default process "
"group. That is, when you have called "
"init_process_group and have not passed "
"process_group argument to DDP constructor"
)
@contextmanager
def no_sync(self):
old_require_backward_grad_sync = self.require_backward_grad_sync
self.require_backward_grad_sync = False
try:
yield
finally:
self.require_backward_grad_sync = old_require_backward_grad_sync
def forward(self, *inputs, **kwargs):
self.reducer.save_thread_local_state()
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.logger.set_runtime_stats_and_log()
self.reducer.prepare_for_forward()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
ones = torch.ones(1, device=self.device)
work = dist.all_reduce(ones, group=self.process_group, async_op=True)
self.reducer._set_forward_pass_work_handle(
work,
self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size,
)
# Calling _rebuild_buckets before forward compuation,
# It may allocate new buckets before deallocating old buckets
# inside _rebuild_buckets. To save peak memory usage,
# call _rebuild_buckets before the peak memory usage increases
# during forward computation.
# This should be called only once during whole training period.
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
logging.info("Reducer buckets have been rebuilt in this iteration.")
if self.require_forward_param_sync:
self._sync_params()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
# Notify joined ranks whether they should sync in backwards pass or not.
self._check_global_requires_backward_grad_sync(is_joined_rank=False)
if self.device_ids:
inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])
output = self.module(*inputs[0], **kwargs[0])
else:
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
self.require_forward_param_sync = False
return output
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def _recursive_to(self, inputs, target_gpu):
def to_map(obj):
if isinstance(obj, torch.Tensor):
if not self.use_side_stream_for_tensor_copies:
return (obj.to(target_gpu),)
else:
stream = _get_stream(target_gpu)
with torch.cuda.stream(stream):
output = obj.to(target_gpu)
with torch.cuda.device(target_gpu):
current_stream = torch.cuda.current_stream()
current_stream.wait_stream(stream)
output.record_stream(current_stream)
return (output,)
if is_namedtuple(obj):
return [type(obj)(*args) for args in zip(*map(to_map, obj))]
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(to_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return [list(i) for i in zip(*map(to_map, obj))]
if isinstance(obj, dict) and len(obj) > 0:
return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]
return [obj]
try:
res = to_map(inputs)
finally:
to_map = None
return res
def to_kwargs(self, inputs, kwargs, device_id):
inputs = self._recursive_to(inputs, device_id) if inputs else []
kwargs = self._recursive_to(kwargs, device_id) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def train(self, mode=True):
super(DistributedDataParallel, self).train(mode)
for module in self._module_copies[1:]:
module.train(mode)
return self
def _schedule_shadow_all_reduce_for_fwd_pass(self):
all_active_procs = torch.zeros(1, device=self.device)
dist.all_reduce(all_active_procs, group=self.process_group)
return all_active_procs.item()
def _check_global_requires_backward_grad_sync(self, is_joined_rank):
if not is_joined_rank and self.require_backward_grad_sync:
requires_sync_tensor = torch.ones(1, device=self.device)
else:
requires_sync_tensor = torch.zeros(1, device=self.device)
work = dist.all_reduce(
requires_sync_tensor, group=self.process_group, async_op=True
)
return work, requires_sync_tensor
def _check_and_sync_module_buffers(self):
if self.will_sync_module_buffers():
authoritative_rank = self._find_common_rank(self._distributed_rank, False)
self._distributed_broadcast_coalesced(
self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank
)
def _sync_final_model(self, is_last_joiner):
self._authoritative_rank = self._find_common_rank(
self._distributed_rank, is_last_joiner
)
self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)
# pass.
def _match_all_reduce_for_bwd_pass(self):
allreduce_work = []
# Schedule allreduce in the same order as Reducer schedules them, i.e.
# the order of the buckets. Retrieving the bucket order from the reducer
# ensures that we keep the same order in join mode, such as when bucket
# order is rebuilt dynamically.
all_bucket_tensors = self.reducer.get_bucket_tensors()
for bucket_tensors in all_bucket_tensors:
# Joined processes contribute zero gradient. In the case that
# divide_by_initial_world_size=True, we divide grads by the static
# world size, if not, the dividing factor is reduced by the number
# of joined processes.
zero_tensors = [torch.zeros_like(t) for t in bucket_tensors]
work = self.process_group.allreduce(zero_tensors)
allreduce_work.append(work)
for work in allreduce_work:
work.wait()
# Allreduces the used parameter mapping across ranks.
def _match_unused_params_allreduce(self):
locally_used_param_maps = self.reducer._get_local_used_maps()
self.process_group.allreduce(locally_used_param_maps)
@contextmanager
def join(self, divide_by_initial_world_size=True, enable=True):
# Log uneven input API usage.
self.logger._set_uneven_input_join()
try:
has_error = False
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=enable,
ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,
)
yield
except Exception as e:
# Set to skip any processing in the finally block.
has_error = True
raise e
finally:
# Skip any processing to let the exception immediately be raised if
# there was one.
if enable and not has_error:
all_procs_joined = False
is_last_joiner = True
i = 0
WARN_THRESHOLD = 1000
warnings.simplefilter("once")
while not all_procs_joined:
if i > WARN_THRESHOLD:
my_rank = self._distributed_rank
warnings.warn(
"Detected uneven input skew of greater "
f"than {WARN_THRESHOLD}. This means that rank {my_rank} "
f"has at least {WARN_THRESHOLD} fewer inputs than "
"other currently active ranks. This level of skew could "
"lead to performance degradation during training."
)
# Schedules allreduce to match fwd pass allreduce in non-joined procs
num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()
if num_active_procs == 0:
all_procs_joined = True
else:
# Some DDP process still needs to be joined.
if is_last_joiner:
is_last_joiner = False
# It will rebuild buckets only once during training period
self.reducer._rebuild_buckets()
# Schedule a corresponding broadcast if we are syncing module
# buffers in the forward pass.
self._check_and_sync_module_buffers()
(
work,
should_sync_backwards_tensor,
) = self._check_global_requires_backward_grad_sync(
is_joined_rank=True
)
work.wait()
# If nonzero, then we should sync in the bwd pass.
should_sync_backwards = should_sync_backwards_tensor.item() != 0
# Forward param sync is disabled in the next iteration
# if we are skipping grad sync this iteration. Hence, we
# set require_forward_param_sync appropriately here.
self.require_forward_param_sync = should_sync_backwards
if not should_sync_backwards:
continue
# Schedules one allreduce per gradient bucket to match
# the backwards pass allreduce.
self._match_all_reduce_for_bwd_pass()
# Check if we need to allreduce locally unused params.
if self.find_unused_parameters:
self._match_unused_params_allreduce()
# It will push rebuilt params only once during training period
self.reducer._push_all_rebuilt_params()
i += 1
# All procs joined. Agree on authoritative rank and broadcast the model.
self._sync_final_model(is_last_joiner)
def register_comm_hook(self, state: object, hook: callable):
self._check_comm_hook(hook)
self.logger._set_comm_hook_name(hook.__qualname__)
dist._register_comm_hook(self.reducer, state, hook)
def _register_builtin_comm_hook(self, comm_hook_type):
self.logger._set_comm_hook_name(str(comm_hook_type))
dist._register_builtin_comm_hook(self.reducer, comm_hook_type)
def _distributed_broadcast_coalesced(
self, tensors, buffer_size, authoritative_rank=0
):
dist._broadcast_coalesced(
self.process_group, tensors, buffer_size, authoritative_rank
)
def will_sync_module_buffers(self):
return (
self.require_forward_param_sync
and self.broadcast_buffers
and len(self.modules_buffers[0]) > 0
)
def _find_common_rank(self, input_rank, rank_cond):
# -1 indicates that this rank is not under consideration to be the
# common_rank
rank_to_use = torch.tensor(
[input_rank if rank_cond else -1],
device=self.device,
)
dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)
if rank_to_use.item() == -1:
raise ValueError(
"BUG! Expected rank_cond to be true for at least one process."
)
return rank_to_use.item()
def _sync_params(self):
with torch.no_grad():
# module buffer sync
if self.will_sync_module_buffers():
# Synchronize buffers across processes.
# If we are running DDP with the join manager, we have to agree
# upon a rank to sync module buffers from, since rank 0 may
# already have been joined and have stale module buffers.
if self.ddp_uneven_inputs_config.ddp_join_enabled:
authoritative_rank = self._find_common_rank(
self._distributed_rank, True
)
else:
# The process with rank 0 is considered the authoritative copy.
authoritative_rank = 0
self._distributed_broadcast_coalesced(
self.modules_buffers[0],
self.broadcast_bucket_size,
authoritative_rank,
)
def _passing_sync_batchnorm_handle(self, module_copies):
for dev_idx, module in enumerate(module_copies):
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm):
assert (
self.device_type != "cpu"
), "SyncBatchNorm layers only work with GPU modules"
layer._specify_ddp_gpu_num(1)
def _check_comm_hook(self, hook):
if not callable(hook):
raise TypeError("Communication hook must be callable.")
sig = inspect.signature(hook)
if (
sig.parameters["bucket"].annotation != inspect._empty
and sig.parameters["bucket"].annotation != dist.GradBucket
):
raise ValueError(
"Communication hook: bucket annotation should be dist.GradBucket."
)
if sig.return_annotation != inspect._empty and (
sig.return_annotation != torch.futures.Future
and sig.return_annotation != torch._C.Future
):
raise ValueError(
"Communication hook: return annotation should be torch.futures.Future or torch._C.Future."
)
@property
def _distributed_rank(self):
return dist.get_rank(self.process_group)
@staticmethod
def _set_params_and_buffers_to_ignore_for_model(
module, params_and_buffers_to_ignore
):
# This is a workaround to set parameters and buffers DDP should ignore
# during synchronization. It will be removed when the API is finalized
# as part of addressing https://github.com/pytorch/pytorch/issues/43690.
module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore
def get_ddp_logging_data(self):
return self.logger._get_ddp_logging_data()
def set_ddp_runtime_logging_sample_rate(self, sample_rate):
if sample_rate < 1:
raise ValueError(
"DDP runtime logging sample rate should be equal or greater than 1"
)
self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)
| true
| true
|
f715a88fabf1954be8dfa7b40347f927f0d59c06
| 362
|
py
|
Python
|
test.py
|
fahmirevo/sign-language-recognition
|
ff5e3f4ffb7ecba15667be8870db62717f1fab66
|
[
"MIT"
] | null | null | null |
test.py
|
fahmirevo/sign-language-recognition
|
ff5e3f4ffb7ecba15667be8870db62717f1fab66
|
[
"MIT"
] | null | null | null |
test.py
|
fahmirevo/sign-language-recognition
|
ff5e3f4ffb7ecba15667be8870db62717f1fab66
|
[
"MIT"
] | null | null | null |
from keras.models import load_model
import numpy as np
X = np.load("dataset/X_test.npy")
Y = np.load("dataset/Y_test.npy")
model = load_model("model")
score = model.evaluate(X, Y)
print(score[0], score[1])
# print(np.argmax(model.predict(X[:200]), axis=1))
# print(np.argmax(model.predict(X), axis=1) == np.argmax(Y, axis=1))
# print(model.predict(X[:50]))
| 22.625
| 68
| 0.685083
|
from keras.models import load_model
import numpy as np
X = np.load("dataset/X_test.npy")
Y = np.load("dataset/Y_test.npy")
model = load_model("model")
score = model.evaluate(X, Y)
print(score[0], score[1])
| true
| true
|
f715aa3a3e29c2e7729e963647247ab81b1771d1
| 7,083
|
py
|
Python
|
deepchem/molnet/load_function/factors_datasets.py
|
deloragaskins/deepchem
|
234ab699cdb997e5963966a8b6926cb2cda7c064
|
[
"MIT"
] | 3,782
|
2016-02-21T03:53:11.000Z
|
2022-03-31T16:10:26.000Z
|
deepchem/molnet/load_function/factors_datasets.py
|
deloragaskins/deepchem
|
234ab699cdb997e5963966a8b6926cb2cda7c064
|
[
"MIT"
] | 2,666
|
2016-02-11T01:54:54.000Z
|
2022-03-31T11:14:33.000Z
|
deepchem/molnet/load_function/factors_datasets.py
|
deloragaskins/deepchem
|
234ab699cdb997e5963966a8b6926cb2cda7c064
|
[
"MIT"
] | 1,597
|
2016-02-21T03:10:08.000Z
|
2022-03-30T13:21:28.000Z
|
"""
FACTOR dataset loader
"""
import os
import logging
import time
import numpy as np
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
logger = logging.getLogger(__name__)
TRAIN_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_training_disguised_combined_full.csv.gz"
VALID_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test1_disguised_combined_full.csv.gz"
TEST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test2_disguised_combined_full.csv.gz"
TRAIN_FILENAME = "FACTORS_training_disguised_combined_full.csv.gz"
VALID_FILENAME = "FACTORS_test1_disguised_combined_full.csv.gz"
TEST_FILENAME = "FACTORS_test2_disguised_combined_full.csv.gz"
def remove_missing_entries(dataset):
"""Remove missing entries.
Some of the datasets have missing entries that sneak in as zero'd out
feature vectors. Get rid of them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
logger.info("Shard %d has %d missing entries." %
(i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
def get_transformers(train_dataset):
"""Gets transformers applied to the dataset"""
transformers = list()
# TODO: Check if anything needs to be added
return transformers
def gen_factors(FACTORS_tasks,
data_dir,
train_dir,
valid_dir,
test_dir,
shard_size=2000):
"""Loads the FACTORS dataset; does not do train/test split"""
time1 = time.time()
train_files = os.path.join(data_dir, TRAIN_FILENAME)
valid_files = os.path.join(data_dir, VALID_FILENAME)
test_files = os.path.join(data_dir, TEST_FILENAME)
if not os.path.exists(train_files):
logger.info("Downloading train file...")
deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)
logger.info("Training file download complete.")
logger.info("Downloading validation file...")
deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)
logger.info("Validation file download complete.")
logger.info("Downloading test file...")
deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)
logger.info("Test file download complete")
# Featurize the FACTORS dataset
logger.info("About to featurize the FACTORS dataset")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(
tasks=FACTORS_tasks, id_field="Molecule", featurizer=featurizer)
logger.info("Featurizing the train dataset...")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
logger.info("Featurizing the validation dataset...")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
logger.info("Featurizing the test dataset...")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
logger.info("Remove missing entries from dataset")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
# Shuffle the training data
logger.info("Shuffling the training dataset")
train_dataset.sparse_shuffle()
# Apply transformations
logger.info("Transforming datasets with transformers")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info("Performing transformations with {}".format(
transformer.__class__.__name__))
logger.info("Transforming the training dataset...")
train_dataset = transformer.transform(train_dataset)
logger.info("Transforming the validation dataset...")
valid_dataset = transformer.transform(valid_dataset)
logger.info("Transforming the test dataset...")
test_dataset = transformer.transform(test_dataset)
logger.info("Transformations complete.")
logger.info("Moving datasets to corresponding directories")
train_dataset.move(train_dir)
logger.info("Train dataset moved.")
valid_dataset.move(valid_dir)
logger.info("Validation dataset moved.")
test_dataset.move(test_dir)
logger.info("Test dataset moved.")
time2 = time.time()
# TIMING
logger.info("TIMING: FACTORS fitting took %0.3f s" % (time2 - time1))
return train_dataset, valid_dataset, test_dataset
def load_factors(shard_size=2000, featurizer=None, split=None, reload=True):
"""Loads FACTOR dataset; does not do train/test split
The Factors dataset is an in-house dataset from Merck that was first introduced in the following paper:
Ramsundar, Bharath, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
It contains 1500 Merck in-house compounds that were measured
for IC50 of inhibition on 12 serine proteases. Unlike most of
the other datasets featured in MoleculeNet, the Factors
collection does not have structures for the compounds tested
since they were proprietary Merck compounds. However, the
collection does feature pre-computed descriptors for these
compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
Parameters
----------
shard_size: int, optional
Size of the DiskDataset shards to write on disk
featurizer: optional
Ignored since featurization pre-computed
split: optional
Ignored since split pre-computed
reload: bool, optional
Whether to automatically re-load from disk
"""
FACTORS_tasks = [
'T_00001', 'T_00002', 'T_00003', 'T_00004', 'T_00005', 'T_00006',
'T_00007', 'T_00008', 'T_00009', 'T_00010', 'T_00011', 'T_00012'
]
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "factors")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = gen_factors(
FACTORS_tasks=FACTORS_tasks,
data_dir=data_dir,
train_dir=train_dir,
valid_dir=valid_dir,
test_dir=test_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return FACTORS_tasks, (train_dataset, valid_dataset,
test_dataset), transformers
| 34.217391
| 149
| 0.743188
|
import os
import logging
import time
import numpy as np
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
logger = logging.getLogger(__name__)
TRAIN_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_training_disguised_combined_full.csv.gz"
VALID_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test1_disguised_combined_full.csv.gz"
TEST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test2_disguised_combined_full.csv.gz"
TRAIN_FILENAME = "FACTORS_training_disguised_combined_full.csv.gz"
VALID_FILENAME = "FACTORS_test1_disguised_combined_full.csv.gz"
TEST_FILENAME = "FACTORS_test2_disguised_combined_full.csv.gz"
def remove_missing_entries(dataset):
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
logger.info("Shard %d has %d missing entries." %
(i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
def get_transformers(train_dataset):
transformers = list()
return transformers
def gen_factors(FACTORS_tasks,
data_dir,
train_dir,
valid_dir,
test_dir,
shard_size=2000):
time1 = time.time()
train_files = os.path.join(data_dir, TRAIN_FILENAME)
valid_files = os.path.join(data_dir, VALID_FILENAME)
test_files = os.path.join(data_dir, TEST_FILENAME)
if not os.path.exists(train_files):
logger.info("Downloading train file...")
deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)
logger.info("Training file download complete.")
logger.info("Downloading validation file...")
deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)
logger.info("Validation file download complete.")
logger.info("Downloading test file...")
deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)
logger.info("Test file download complete")
logger.info("About to featurize the FACTORS dataset")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(
tasks=FACTORS_tasks, id_field="Molecule", featurizer=featurizer)
logger.info("Featurizing the train dataset...")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
logger.info("Featurizing the validation dataset...")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
logger.info("Featurizing the test dataset...")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
logger.info("Remove missing entries from dataset")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
logger.info("Shuffling the training dataset")
train_dataset.sparse_shuffle()
logger.info("Transforming datasets with transformers")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info("Performing transformations with {}".format(
transformer.__class__.__name__))
logger.info("Transforming the training dataset...")
train_dataset = transformer.transform(train_dataset)
logger.info("Transforming the validation dataset...")
valid_dataset = transformer.transform(valid_dataset)
logger.info("Transforming the test dataset...")
test_dataset = transformer.transform(test_dataset)
logger.info("Transformations complete.")
logger.info("Moving datasets to corresponding directories")
train_dataset.move(train_dir)
logger.info("Train dataset moved.")
valid_dataset.move(valid_dir)
logger.info("Validation dataset moved.")
test_dataset.move(test_dir)
logger.info("Test dataset moved.")
time2 = time.time()
logger.info("TIMING: FACTORS fitting took %0.3f s" % (time2 - time1))
return train_dataset, valid_dataset, test_dataset
def load_factors(shard_size=2000, featurizer=None, split=None, reload=True):
FACTORS_tasks = [
'T_00001', 'T_00002', 'T_00003', 'T_00004', 'T_00005', 'T_00006',
'T_00007', 'T_00008', 'T_00009', 'T_00010', 'T_00011', 'T_00012'
]
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "factors")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = gen_factors(
FACTORS_tasks=FACTORS_tasks,
data_dir=data_dir,
train_dir=train_dir,
valid_dir=valid_dir,
test_dir=test_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return FACTORS_tasks, (train_dataset, valid_dataset,
test_dataset), transformers
| true
| true
|
f715aa40f422bc6e058fe8e7a1b2311652df84ae
| 2,299
|
py
|
Python
|
tests/threadpool/test_concurrency.py
|
iiSeymour/aiofiles
|
cba6910a491f585f2dc4a87e215f5f52ebde6f48
|
[
"Apache-2.0"
] | null | null | null |
tests/threadpool/test_concurrency.py
|
iiSeymour/aiofiles
|
cba6910a491f585f2dc4a87e215f5f52ebde6f48
|
[
"Apache-2.0"
] | null | null | null |
tests/threadpool/test_concurrency.py
|
iiSeymour/aiofiles
|
cba6910a491f585f2dc4a87e215f5f52ebde6f48
|
[
"Apache-2.0"
] | 1
|
2018-09-19T15:45:51.000Z
|
2018-09-19T15:45:51.000Z
|
"""Test concurrency properties of the implementation."""
from os.path import dirname
from os.path import join
import time
import asyncio
import pytest
import aiofiles.threadpool
@pytest.mark.asyncio
def test_slow_file(monkeypatch, unused_tcp_port):
"""Monkey patch open and file.read(), and assert the loop still works."""
filename = join(dirname(__file__), '..', 'resources', 'multiline_file.txt')
with open(filename, mode='rb') as f:
contents = f.read()
def new_open(*args, **kwargs):
time.sleep(1)
return open(*args, **kwargs)
monkeypatch.setattr(aiofiles.threadpool, 'sync_open', value=new_open)
@asyncio.coroutine
def serve_file(_, writer):
file = yield from aiofiles.threadpool.open(filename, mode='rb')
try:
while True:
data = yield from file.read(1)
if not data:
break
writer.write(data)
yield from writer.drain()
yield from writer.drain()
finally:
writer.close()
yield from file.close()
@asyncio.coroutine
def return_one(_, writer):
writer.write(b'1')
yield from writer.drain()
writer.close()
counter = 0
@asyncio.coroutine
def spam_client():
nonlocal counter
while True:
r, w = yield from asyncio.open_connection('127.0.0.1', port=30001)
assert (yield from r.read()) == b'1'
counter += 1
w.close()
yield from asyncio.sleep(0.01)
file_server = yield from asyncio.start_server(serve_file,
port=unused_tcp_port)
spam_server = yield from asyncio.start_server(return_one, port=30001)
spam_task = asyncio.async(spam_client())
reader, writer = yield from asyncio.open_connection('127.0.0.1',
port=unused_tcp_port)
actual_contents = yield from reader.read()
writer.close()
yield from asyncio.sleep(0)
file_server.close()
spam_server.close()
yield from file_server.wait_closed()
yield from spam_server.wait_closed()
spam_task.cancel()
assert actual_contents == contents
assert counter > 40
| 28.382716
| 79
| 0.599391
|
"""Test concurrency properties of the implementation."""
from os.path import dirname
from os.path import join
import time
import asyncio
import pytest
import aiofiles.threadpool
@pytest.mark.asyncio
def test_slow_file(monkeypatch, unused_tcp_port):
"""Monkey patch open and file.read(), and assert the loop still works."""
filename = join(dirname(__file__), '..', 'resources', 'multiline_file.txt')
with open(filename, mode='rb') as f:
contents = f.read()
def new_open(*args, **kwargs):
time.sleep(1)
return open(*args, **kwargs)
monkeypatch.setattr(aiofiles.threadpool, 'sync_open', value=new_open)
@asyncio.coroutine
def serve_file(_, writer):
file = yield from aiofiles.threadpool.open(filename, mode='rb')
try:
while True:
data = yield from file.read(1)
if not data:
break
writer.write(data)
yield from writer.drain()
yield from writer.drain()
finally:
writer.close()
yield from file.close()
@asyncio.coroutine
def return_one(_, writer):
writer.write(b'1')
yield from writer.drain()
writer.close()
counter = 0
@asyncio.coroutine
def spam_client():
nonlocal counter
while True:
r, w = yield from asyncio.open_connection('127.0.0.1', port=30001)
assert (yield from r.read()) == b'1'
counter += 1
w.close()
yield from asyncio.sleep(0.01)
file_server = yield from asyncio.start_server(serve_file,
port=unused_tcp_port)
spam_server = yield from asyncio.start_server(return_one, port=30001)
spam_task = asyncio.async(spam_client())
reader, writer = yield from asyncio.open_connection('127.0.0.1',
port=unused_tcp_port)
actual_contents = yield from reader.read()
writer.close()
yield from asyncio.sleep(0)
file_server.close()
spam_server.close()
yield from file_server.wait_closed()
yield from spam_server.wait_closed()
spam_task.cancel()
assert actual_contents == contents
assert counter > 40
| false
| true
|
f715aab0451804e3126d9a43d6e5f34e22e7a392
| 15,596
|
py
|
Python
|
ch16-deployment/.venv/lib/python3.10/site-packages/psycopg/sql.py
|
wsvincent/djangoforbeginners_32
|
aba7c99aa6050cfe8fb9d588af58c9f67411ae8a
|
[
"MIT"
] | 5
|
2021-12-14T03:33:39.000Z
|
2022-01-11T14:13:21.000Z
|
ch16-deployment/.venv/lib/python3.10/site-packages/psycopg/sql.py
|
wsvincent/djangoforbeginners_32
|
aba7c99aa6050cfe8fb9d588af58c9f67411ae8a
|
[
"MIT"
] | null | null | null |
ch16-deployment/.venv/lib/python3.10/site-packages/psycopg/sql.py
|
wsvincent/djangoforbeginners_32
|
aba7c99aa6050cfe8fb9d588af58c9f67411ae8a
|
[
"MIT"
] | null | null | null |
"""
SQL composition utility module
"""
# Copyright (C) 2020-2021 The Psycopg Team
import codecs
import string
from abc import ABC, abstractmethod
from typing import Any, Iterator, List, Optional, Sequence, Union
from .pq import Escaping
from .abc import AdaptContext
from .adapt import Transformer, PyFormat
from ._encodings import pgconn_encoding
def quote(obj: Any, context: Optional[AdaptContext] = None) -> str:
"""
Adapt a Python object to a quoted SQL string.
Use this function only if you absolutely want to convert a Python string to
an SQL quoted literal to use e.g. to generate batch SQL and you won't have
a connection avaliable when you will need to use it.
This function is relatively inefficient, because it doesn't cache the
adaptation rules. If you pass a *context* you can adapt the adaptation
rules used, otherwise only global rules are used.
"""
return Literal(obj).as_string(context)
class Composable(ABC):
"""
Abstract base class for objects that can be used to compose an SQL string.
`!Composable` objects can be passed directly to
`~psycopg.Cursor.execute()`, `~psycopg.Cursor.executemany()`,
`~psycopg.Cursor.copy()` in place of the query string.
`!Composable` objects can be joined using the ``+`` operator: the result
will be a `Composed` instance containing the objects joined. The operator
``*`` is also supported with an integer argument: the result is a
`!Composed` instance containing the left argument repeated as many times as
requested.
"""
def __init__(self, obj: Any):
self._obj = obj
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._obj!r})"
@abstractmethod
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
"""
Return the value of the object as bytes.
:param context: the context to evaluate the object into.
:type context: `connection` or `cursor`
The method is automatically invoked by `~psycopg.Cursor.execute()`,
`~psycopg.Cursor.executemany()`, `~psycopg.Cursor.copy()` if a
`!Composable` is passed instead of the query string.
"""
raise NotImplementedError
def as_string(self, context: Optional[AdaptContext]) -> str:
"""
Return the value of the object as string.
:param context: the context to evaluate the string into.
:type context: `connection` or `cursor`
"""
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
b = self.as_bytes(context)
if isinstance(b, bytes):
return b.decode(enc)
else:
# buffer object
return codecs.lookup(enc).decode(b)[0]
def __add__(self, other: "Composable") -> "Composed":
if isinstance(other, Composed):
return Composed([self]) + other
if isinstance(other, Composable):
return Composed([self]) + Composed([other])
else:
return NotImplemented
def __mul__(self, n: int) -> "Composed":
return Composed([self] * n)
def __eq__(self, other: Any) -> bool:
return type(self) is type(other) and self._obj == other._obj
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
class Composed(Composable):
"""
A `Composable` object made of a sequence of `!Composable`.
The object is usually created using `!Composable` operators and methods.
However it is possible to create a `!Composed` directly specifying a
sequence of objects as arguments: if they are not `!Composable` they will
be wrapped in a `Literal`.
Example::
>>> comp = sql.Composed(
... [sql.SQL("INSERT INTO "), sql.Identifier("table")])
>>> print(comp.as_string(conn))
INSERT INTO "table"
`!Composed` objects are iterable (so they can be used in `SQL.join` for
instance).
"""
_obj: List[Composable]
def __init__(self, seq: Sequence[Any]):
seq = [
obj if isinstance(obj, Composable) else Literal(obj) for obj in seq
]
super().__init__(seq)
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
return b"".join(obj.as_bytes(context) for obj in self._obj)
def __iter__(self) -> Iterator[Composable]:
return iter(self._obj)
def __add__(self, other: Composable) -> "Composed":
if isinstance(other, Composed):
return Composed(self._obj + other._obj)
if isinstance(other, Composable):
return Composed(self._obj + [other])
else:
return NotImplemented
def join(self, joiner: Union["SQL", str]) -> "Composed":
"""
Return a new `!Composed` interposing the *joiner* with the `!Composed` items.
The *joiner* must be a `SQL` or a string which will be interpreted as
an `SQL`.
Example::
>>> fields = sql.Identifier('foo') + sql.Identifier('bar') # a Composed
>>> print(fields.join(', ').as_string(conn))
"foo", "bar"
"""
if isinstance(joiner, str):
joiner = SQL(joiner)
elif not isinstance(joiner, SQL):
raise TypeError(
f"Composed.join() argument must be strings or SQL,"
f" got {joiner!r} instead"
)
return joiner.join(self._obj)
class SQL(Composable):
"""
A `Composable` representing a snippet of SQL statement.
`!SQL` exposes `join()` and `format()` methods useful to create a template
where to merge variable parts of a query (for instance field or table
names).
The *string* doesn't undergo any form of escaping, so it is not suitable to
represent variable identifiers or values: you should only use it to pass
constant strings representing templates or snippets of SQL statements; use
other objects such as `Identifier` or `Literal` to represent variable
parts.
Example::
>>> query = sql.SQL("SELECT {0} FROM {1}").format(
... sql.SQL(', ').join([sql.Identifier('foo'), sql.Identifier('bar')]),
... sql.Identifier('table'))
>>> print(query.as_string(conn))
SELECT "foo", "bar" FROM "table"
"""
_obj: str
_formatter = string.Formatter()
def __init__(self, obj: str):
super().__init__(obj)
if not isinstance(obj, str):
raise TypeError(f"SQL values must be strings, got {obj!r} instead")
def as_string(self, context: Optional[AdaptContext]) -> str:
return self._obj
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
enc = "utf-8"
if context:
conn = context.connection
if conn:
enc = pgconn_encoding(conn.pgconn)
return self._obj.encode(enc)
def format(self, *args: Any, **kwargs: Any) -> Composed:
"""
Merge `Composable` objects into a template.
:param args: parameters to replace to numbered (``{0}``, ``{1}``) or
auto-numbered (``{}``) placeholders
:param kwargs: parameters to replace to named (``{name}``) placeholders
:return: the union of the `!SQL` string with placeholders replaced
:rtype: `Composed`
The method is similar to the Python `str.format()` method: the string
template supports auto-numbered (``{}``), numbered (``{0}``,
``{1}``...), and named placeholders (``{name}``), with positional
arguments replacing the numbered placeholders and keywords replacing
the named ones. However placeholder modifiers (``{0!r}``, ``{0:<10}``)
are not supported.
If a `!Composable` objects is passed to the template it will be merged
according to its `as_string()` method. If any other Python object is
passed, it will be wrapped in a `Literal` object and so escaped
according to SQL rules.
Example::
>>> print(sql.SQL("SELECT * FROM {} WHERE {} = %s")
... .format(sql.Identifier('people'), sql.Identifier('id'))
... .as_string(conn))
SELECT * FROM "people" WHERE "id" = %s
>>> print(sql.SQL("SELECT * FROM {tbl} WHERE name = {name}")
... .format(tbl=sql.Identifier('people'), name="O'Rourke"))
... .as_string(conn))
SELECT * FROM "people" WHERE name = 'O''Rourke'
"""
rv: List[Composable] = []
autonum: Optional[int] = 0
for pre, name, spec, conv in self._formatter.parse(self._obj):
if spec:
raise ValueError("no format specification supported by SQL")
if conv:
raise ValueError("no format conversion supported by SQL")
if pre:
rv.append(SQL(pre))
if name is None:
continue
if name.isdigit():
if autonum:
raise ValueError(
"cannot switch from automatic field numbering to manual"
)
rv.append(args[int(name)])
autonum = None
elif not name:
if autonum is None:
raise ValueError(
"cannot switch from manual field numbering to automatic"
)
rv.append(args[autonum])
autonum += 1
else:
rv.append(kwargs[name])
return Composed(rv)
def join(self, seq: Sequence[Composable]) -> Composed:
"""
Join a sequence of `Composable`.
:param seq: the elements to join.
:type seq: iterable of `!Composable`
Use the `!SQL` object's *string* to separate the elements in *seq*.
Note that `Composed` objects are iterable too, so they can be used as
argument for this method.
Example::
>>> snip = sql.SQL(', ').join(
... sql.Identifier(n) for n in ['foo', 'bar', 'baz'])
>>> print(snip.as_string(conn))
"foo", "bar", "baz"
"""
rv = []
it = iter(seq)
try:
rv.append(next(it))
except StopIteration:
pass
else:
for i in it:
rv.append(self)
rv.append(i)
return Composed(rv)
class Identifier(Composable):
"""
A `Composable` representing an SQL identifier or a dot-separated sequence.
Identifiers usually represent names of database objects, such as tables or
fields. PostgreSQL identifiers follow `different rules`__ than SQL string
literals for escaping (e.g. they use double quotes instead of single).
.. __: https://www.postgresql.org/docs/current/sql-syntax-lexical.html# \
SQL-SYNTAX-IDENTIFIERS
Example::
>>> t1 = sql.Identifier("foo")
>>> t2 = sql.Identifier("ba'r")
>>> t3 = sql.Identifier('ba"z')
>>> print(sql.SQL(', ').join([t1, t2, t3]).as_string(conn))
"foo", "ba'r", "ba""z"
Multiple strings can be passed to the object to represent a qualified name,
i.e. a dot-separated sequence of identifiers.
Example::
>>> query = sql.SQL("SELECT {} FROM {}").format(
... sql.Identifier("table", "field"),
... sql.Identifier("schema", "table"))
>>> print(query.as_string(conn))
SELECT "table"."field" FROM "schema"."table"
"""
_obj: Sequence[str]
def __init__(self, *strings: str):
# init super() now to make the __repr__ not explode in case of error
super().__init__(strings)
if not strings:
raise TypeError("Identifier cannot be empty")
for s in strings:
if not isinstance(s, str):
raise TypeError(
f"SQL identifier parts must be strings, got {s!r} instead"
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({', '.join(map(repr, self._obj))})"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
if not conn:
raise ValueError("a connection is necessary for Identifier")
esc = Escaping(conn.pgconn)
enc = pgconn_encoding(conn.pgconn)
escs = [esc.escape_identifier(s.encode(enc)) for s in self._obj]
return b".".join(escs)
class Literal(Composable):
"""
A `Composable` representing an SQL value to include in a query.
Usually you will want to include placeholders in the query and pass values
as `~cursor.execute()` arguments. If however you really really need to
include a literal value in the query you can use this object.
The string returned by `!as_string()` follows the normal :ref:`adaptation
rules <types-adaptation>` for Python objects.
Example::
>>> s1 = sql.Literal("foo")
>>> s2 = sql.Literal("ba'r")
>>> s3 = sql.Literal(42)
>>> print(sql.SQL(', ').join([s1, s2, s3]).as_string(conn))
'foo', 'ba''r', 42
"""
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
tx = Transformer(context)
dumper = tx.get_dumper(self._obj, PyFormat.TEXT)
return dumper.quote(self._obj)
class Placeholder(Composable):
"""A `Composable` representing a placeholder for query parameters.
If the name is specified, generate a named placeholder (e.g. ``%(name)s``,
``%(name)b``), otherwise generate a positional placeholder (e.g. ``%s``,
``%b``).
The object is useful to generate SQL queries with a variable number of
arguments.
Examples::
>>> names = ['foo', 'bar', 'baz']
>>> q1 = sql.SQL("INSERT INTO my_table ({}) VALUES ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(sql.Placeholder() * len(names)))
>>> print(q1.as_string(conn))
INSERT INTO my_table ("foo", "bar", "baz") VALUES (%s, %s, %s)
>>> q2 = sql.SQL("INSERT INTO my_table ({}) VALUES ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(map(sql.Placeholder, names)))
>>> print(q2.as_string(conn))
INSERT INTO my_table ("foo", "bar", "baz") VALUES (%(foo)s, %(bar)s, %(baz)s)
"""
def __init__(self, name: str = "", format: PyFormat = PyFormat.AUTO):
super().__init__(name)
if not isinstance(name, str):
raise TypeError(f"expected string as name, got {name!r}")
if ")" in name:
raise ValueError(f"invalid name: {name!r}")
self._format = format
def __repr__(self) -> str:
parts = []
if self._obj:
parts.append(repr(self._obj))
if self._format != PyFormat.AUTO:
parts.append(f"format={PyFormat(self._format).name}")
return f"{self.__class__.__name__}({', '.join(parts)})"
def as_string(self, context: Optional[AdaptContext]) -> str:
code = self._format
return f"%({self._obj}){code}" if self._obj else f"%{code}"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
return self.as_string(context).encode(enc)
# Literals
NULL = SQL("NULL")
DEFAULT = SQL("DEFAULT")
| 33.757576
| 85
| 0.591883
|
import codecs
import string
from abc import ABC, abstractmethod
from typing import Any, Iterator, List, Optional, Sequence, Union
from .pq import Escaping
from .abc import AdaptContext
from .adapt import Transformer, PyFormat
from ._encodings import pgconn_encoding
def quote(obj: Any, context: Optional[AdaptContext] = None) -> str:
return Literal(obj).as_string(context)
class Composable(ABC):
def __init__(self, obj: Any):
self._obj = obj
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._obj!r})"
@abstractmethod
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
raise NotImplementedError
def as_string(self, context: Optional[AdaptContext]) -> str:
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
b = self.as_bytes(context)
if isinstance(b, bytes):
return b.decode(enc)
else:
return codecs.lookup(enc).decode(b)[0]
def __add__(self, other: "Composable") -> "Composed":
if isinstance(other, Composed):
return Composed([self]) + other
if isinstance(other, Composable):
return Composed([self]) + Composed([other])
else:
return NotImplemented
def __mul__(self, n: int) -> "Composed":
return Composed([self] * n)
def __eq__(self, other: Any) -> bool:
return type(self) is type(other) and self._obj == other._obj
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
class Composed(Composable):
_obj: List[Composable]
def __init__(self, seq: Sequence[Any]):
seq = [
obj if isinstance(obj, Composable) else Literal(obj) for obj in seq
]
super().__init__(seq)
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
return b"".join(obj.as_bytes(context) for obj in self._obj)
def __iter__(self) -> Iterator[Composable]:
return iter(self._obj)
def __add__(self, other: Composable) -> "Composed":
if isinstance(other, Composed):
return Composed(self._obj + other._obj)
if isinstance(other, Composable):
return Composed(self._obj + [other])
else:
return NotImplemented
def join(self, joiner: Union["SQL", str]) -> "Composed":
if isinstance(joiner, str):
joiner = SQL(joiner)
elif not isinstance(joiner, SQL):
raise TypeError(
f"Composed.join() argument must be strings or SQL,"
f" got {joiner!r} instead"
)
return joiner.join(self._obj)
class SQL(Composable):
_obj: str
_formatter = string.Formatter()
def __init__(self, obj: str):
super().__init__(obj)
if not isinstance(obj, str):
raise TypeError(f"SQL values must be strings, got {obj!r} instead")
def as_string(self, context: Optional[AdaptContext]) -> str:
return self._obj
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
enc = "utf-8"
if context:
conn = context.connection
if conn:
enc = pgconn_encoding(conn.pgconn)
return self._obj.encode(enc)
def format(self, *args: Any, **kwargs: Any) -> Composed:
rv: List[Composable] = []
autonum: Optional[int] = 0
for pre, name, spec, conv in self._formatter.parse(self._obj):
if spec:
raise ValueError("no format specification supported by SQL")
if conv:
raise ValueError("no format conversion supported by SQL")
if pre:
rv.append(SQL(pre))
if name is None:
continue
if name.isdigit():
if autonum:
raise ValueError(
"cannot switch from automatic field numbering to manual"
)
rv.append(args[int(name)])
autonum = None
elif not name:
if autonum is None:
raise ValueError(
"cannot switch from manual field numbering to automatic"
)
rv.append(args[autonum])
autonum += 1
else:
rv.append(kwargs[name])
return Composed(rv)
def join(self, seq: Sequence[Composable]) -> Composed:
rv = []
it = iter(seq)
try:
rv.append(next(it))
except StopIteration:
pass
else:
for i in it:
rv.append(self)
rv.append(i)
return Composed(rv)
class Identifier(Composable):
_obj: Sequence[str]
def __init__(self, *strings: str):
super().__init__(strings)
if not strings:
raise TypeError("Identifier cannot be empty")
for s in strings:
if not isinstance(s, str):
raise TypeError(
f"SQL identifier parts must be strings, got {s!r} instead"
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({', '.join(map(repr, self._obj))})"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
if not conn:
raise ValueError("a connection is necessary for Identifier")
esc = Escaping(conn.pgconn)
enc = pgconn_encoding(conn.pgconn)
escs = [esc.escape_identifier(s.encode(enc)) for s in self._obj]
return b".".join(escs)
class Literal(Composable):
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
tx = Transformer(context)
dumper = tx.get_dumper(self._obj, PyFormat.TEXT)
return dumper.quote(self._obj)
class Placeholder(Composable):
def __init__(self, name: str = "", format: PyFormat = PyFormat.AUTO):
super().__init__(name)
if not isinstance(name, str):
raise TypeError(f"expected string as name, got {name!r}")
if ")" in name:
raise ValueError(f"invalid name: {name!r}")
self._format = format
def __repr__(self) -> str:
parts = []
if self._obj:
parts.append(repr(self._obj))
if self._format != PyFormat.AUTO:
parts.append(f"format={PyFormat(self._format).name}")
return f"{self.__class__.__name__}({', '.join(parts)})"
def as_string(self, context: Optional[AdaptContext]) -> str:
code = self._format
return f"%({self._obj}){code}" if self._obj else f"%{code}"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
return self.as_string(context).encode(enc)
NULL = SQL("NULL")
DEFAULT = SQL("DEFAULT")
| true
| true
|
f715ab79d63a14aca43b177b0113ad356a236fd3
| 1,008
|
py
|
Python
|
stubs.min/System/Windows/Forms/__init___parts/ToolStripItemAlignment.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/System/Windows/Forms/__init___parts/ToolStripItemAlignment.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/System/Windows/Forms/__init___parts/ToolStripItemAlignment.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class ToolStripItemAlignment(Enum,IComparable,IFormattable,IConvertible):
"""
Determines the alignment of a System.Windows.Forms.ToolStripItem in a System.Windows.Forms.ToolStrip.
enum ToolStripItemAlignment,values: Left (0),Right (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Left=None
Right=None
value__=None
| 29.647059
| 215
| 0.675595
|
class ToolStripItemAlignment(Enum,IComparable,IFormattable,IConvertible):
pass
""" __format__(formattable: IFormattable,format: str) -> str """
pass
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Left=None
Right=None
value__=None
| true
| true
|
f715ab87bac08f07d5539e7b64cc21481970b063
| 8,682
|
py
|
Python
|
gitinfo/utils.py
|
Secozzi/gitinfo
|
4d218c724f5533f4bfc3f1e6ceb30cd78392eae6
|
[
"MIT"
] | null | null | null |
gitinfo/utils.py
|
Secozzi/gitinfo
|
4d218c724f5533f4bfc3f1e6ceb30cd78392eae6
|
[
"MIT"
] | null | null | null |
gitinfo/utils.py
|
Secozzi/gitinfo
|
4d218c724f5533f4bfc3f1e6ceb30cd78392eae6
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from anytree import NodeMixin
from datetime import datetime, timezone
from dotenv import load_dotenv
from os import environ
from os.path import join, dirname
from typing import Tuple, List, Any, Dict, Optional
import re
import requests
from rich.box import Box
__all__ = [
"get_data", "get_token", "get_url_info", "human_size", "humanize_time",
"populate_tree", "ROUNDED_BORDER", "run_query", "set_token", "sort_entries"
]
ROUNDED_BORDER: Box = Box(
"""\
╭──╮
│ │
│ │
│ │
│ │
│ │
│ │
╰──╯
"""
)
def get_token() -> str:
"""
Retrieves the Github Personal Access Token from .env file
"""
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
return environ.get("GITSORT_TOKEN")
def set_token(token: str) -> None:
"""
Set your Github personal access token in order to access
private repositories and extend the usage of the GraphQL API.
"""
import os
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
gitsort_token = os.environ.get("GITSORT_TOKEN")
if not gitsort_token:
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token set!")
else:
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
while inp not in ["y", "n"]:
print("Invalid answer")
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
if inp == "y":
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token updated!")
def run_query(
query: str,
token: str,
variables: dict | None = None,
headers: dict | None = None
) -> Tuple[dict, str]:
"""
Runs a Github GraphQL query and returns the result
:param query: str
GraphQL query
:param token: str
The users Github Personal Access Token
:param variables: dict
GraphQL Variables
:param headers: dict
Request headers
:return: tuple
The response and rate limit
"""
if not headers:
headers = {"Authorization": f"Bearer {token}"}
request = requests.post(
'https://api.github.com/graphql',
json={'query': query, 'variables': variables},
headers=headers
)
if request.status_code == 200:
return request.json(), request.headers["X-RateLimit-Remaining"]
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def get_data(
query: str,
token: str,
query_variables: Dict[str, str]
) -> Tuple[bool, Any, str]:
"""
Get data from query
:param query: str
Graphql Query
:param token: str
Github Personal Access Token
:param query_variables: dict
Variables used in query
:return: tuple
returns a tuple of tree items:
0. bool: True if query failed and return error messages else False
1. Any: Data returned from query
2. str: Rate limit
"""
data, rate_limit = run_query(query, token, query_variables)
if list(data.keys())[0] == "errors":
return True, data["errors"][0]["message"], rate_limit
try:
return False, data["data"]["repository"], rate_limit
except TypeError:
return True, "Query failed. Make sure path and branch is valid.", rate_limit
def get_url_info(url: str) -> Tuple[str, str] | List[str]:
"""
Retrieves owner and repository from a string
:param url: str
Either some form of Github Url or path such as `user/repo/whatever`
:return: tuple | list
Tuple containing owner and repo
"""
is_link = re.compile(r"^(git(hub)?|https?)")
is_git_path = re.compile(r"^[a-zA-Z0-9\-_.]+/[a-zA-Z0-9\-_.]+")
git_url_regex = re.compile(r"^(https|git)?(://|@)?([^/:]+)[/:](?P<owner>[^/:]+)/(?P<name>.+)(.git)?$")
is_git_repo = re.compile(r"((.git)|/)$")
if is_link.match(url):
if is_git_path.match(url):
return url.split("/")[:2]
match = git_url_regex.match(url)
if not match:
raise Exception("Invalid path")
name = match.group("name").split("/")[0]
name = is_git_repo.sub("", name)
owner = match.group("owner")
return owner, name
else:
if url.count("/") > 0:
return url.split("/")[:2]
raise Exception("Link/path must contain both user and repo")
def humanize_time(time_str: str) -> str:
"""
Convert datetime into a more human-friendly format
:param time_str: str
Time string in the ISO 8601 format
:return: str
Human friendly format: <number> <time_period> ago
"""
if not time_str:
return "null"
now = datetime.now()
date = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
date = date.replace(tzinfo=timezone.utc)
diff = int(now.timestamp() - date.timestamp())
times = [
1, 60, 3600, 86400, 604800, 2629746, 31556925
]
times_str = [
"Second", "Minute", "Hour", "Day", "Week", "Month", "Year"
]
temp = [diff // t for t in times][::-1]
for i, t in enumerate(temp):
if t != 0:
return f"{t} {times_str[6-i]}{'' if t == 1 else 's'} ago"
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
"""
Convert bytes into a more human-friendly format
:param bytes: int
Number of bytes
:param units: Optional[List[str]]
units used
:return: str
Return size in human friendly format: <number> <size_unit>
"""
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
class FileEntry(NodeMixin):
def __init__(
self,
name: str,
size: str | int = None,
parent=None,
children=None
) -> None:
super(FileEntry, self).__init__()
if size != None:
self.name = f"{name} ([green]{human_size(size)}[/])"
else:
self.name = f"[blue]{name}/[/]"
self.parent = parent
if children:
self.children = children
class FileEntryRoot(NodeMixin):
def __init__(self, name: str, parent=None, children=None):
super(FileEntryRoot, self).__init__()
self.name = name
self.parent = parent
if children:
self.children = children
def populate_tree(
root_name: str,
data: list,
collapse_blobs: bool = False
) -> "anytree.Node":
"""
Populate the tree
:param root_name: str
Name of root node
:param data: dict
Data
:param collapse_blobs: bool
Collapse files or not
:return: anytree.node
"""
root = FileEntryRoot(root_name)
def edges(tree: FileEntry | FileEntryRoot, parent=None):
collapsed_count = 0
collapsed_size = 0
for entry in tree:
if entry["type"] == "blob":
if collapse_blobs:
collapsed_size += entry["object"]["byteSize"]
collapsed_count += 1
else:
_ = FileEntry(entry["name"], entry["object"]["byteSize"], parent=parent)
else:
node = FileEntry(entry["name"], parent=parent)
if entry["object"]:
edges(entry["object"]["entries"], parent=node)
if collapse_blobs:
_ = FileEntry(f"[orange1]{collapsed_count}[/] Files", collapsed_size, parent=parent)
edges(data, root)
return root
class Reversor:
def __init__(self, obj: Any) -> None:
self.obj = obj
def __eq__(self, other: Any) -> bool:
return other.obj == self.obj
def __lt__(self, other: Any) -> bool:
return other.obj < self.obj
def sort_entries(entries: List[Any]) -> List[Any]:
"""
Recursively sort the data first based on type
then alphabetically
:param entries: list
Entries
:return: list
Entries but sorted
"""
entries = sorted(
entries, key=lambda x: (
Reversor(x["type"]), # First sort by type (reversed)
x["name"].lower() # Then sort by alphabetical
)
)
for entry in entries:
if entry["type"] == "tree" and entry["object"]:
entry["object"]["entries"] = sort_entries(entry["object"]["entries"])
return entries
| 27.738019
| 109
| 0.584543
|
from __future__ import annotations
from anytree import NodeMixin
from datetime import datetime, timezone
from dotenv import load_dotenv
from os import environ
from os.path import join, dirname
from typing import Tuple, List, Any, Dict, Optional
import re
import requests
from rich.box import Box
__all__ = [
"get_data", "get_token", "get_url_info", "human_size", "humanize_time",
"populate_tree", "ROUNDED_BORDER", "run_query", "set_token", "sort_entries"
]
ROUNDED_BORDER: Box = Box(
"""\
╭──╮
│ │
│ │
│ │
│ │
│ │
│ │
╰──╯
"""
)
def get_token() -> str:
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
return environ.get("GITSORT_TOKEN")
def set_token(token: str) -> None:
import os
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
gitsort_token = os.environ.get("GITSORT_TOKEN")
if not gitsort_token:
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token set!")
else:
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
while inp not in ["y", "n"]:
print("Invalid answer")
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
if inp == "y":
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token updated!")
def run_query(
query: str,
token: str,
variables: dict | None = None,
headers: dict | None = None
) -> Tuple[dict, str]:
if not headers:
headers = {"Authorization": f"Bearer {token}"}
request = requests.post(
'https://api.github.com/graphql',
json={'query': query, 'variables': variables},
headers=headers
)
if request.status_code == 200:
return request.json(), request.headers["X-RateLimit-Remaining"]
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def get_data(
query: str,
token: str,
query_variables: Dict[str, str]
) -> Tuple[bool, Any, str]:
data, rate_limit = run_query(query, token, query_variables)
if list(data.keys())[0] == "errors":
return True, data["errors"][0]["message"], rate_limit
try:
return False, data["data"]["repository"], rate_limit
except TypeError:
return True, "Query failed. Make sure path and branch is valid.", rate_limit
def get_url_info(url: str) -> Tuple[str, str] | List[str]:
is_link = re.compile(r"^(git(hub)?|https?)")
is_git_path = re.compile(r"^[a-zA-Z0-9\-_.]+/[a-zA-Z0-9\-_.]+")
git_url_regex = re.compile(r"^(https|git)?(://|@)?([^/:]+)[/:](?P<owner>[^/:]+)/(?P<name>.+)(.git)?$")
is_git_repo = re.compile(r"((.git)|/)$")
if is_link.match(url):
if is_git_path.match(url):
return url.split("/")[:2]
match = git_url_regex.match(url)
if not match:
raise Exception("Invalid path")
name = match.group("name").split("/")[0]
name = is_git_repo.sub("", name)
owner = match.group("owner")
return owner, name
else:
if url.count("/") > 0:
return url.split("/")[:2]
raise Exception("Link/path must contain both user and repo")
def humanize_time(time_str: str) -> str:
if not time_str:
return "null"
now = datetime.now()
date = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
date = date.replace(tzinfo=timezone.utc)
diff = int(now.timestamp() - date.timestamp())
times = [
1, 60, 3600, 86400, 604800, 2629746, 31556925
]
times_str = [
"Second", "Minute", "Hour", "Day", "Week", "Month", "Year"
]
temp = [diff // t for t in times][::-1]
for i, t in enumerate(temp):
if t != 0:
return f"{t} {times_str[6-i]}{'' if t == 1 else 's'} ago"
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
class FileEntry(NodeMixin):
def __init__(
self,
name: str,
size: str | int = None,
parent=None,
children=None
) -> None:
super(FileEntry, self).__init__()
if size != None:
self.name = f"{name} ([green]{human_size(size)}[/])"
else:
self.name = f"[blue]{name}/[/]"
self.parent = parent
if children:
self.children = children
class FileEntryRoot(NodeMixin):
def __init__(self, name: str, parent=None, children=None):
super(FileEntryRoot, self).__init__()
self.name = name
self.parent = parent
if children:
self.children = children
def populate_tree(
root_name: str,
data: list,
collapse_blobs: bool = False
) -> "anytree.Node":
root = FileEntryRoot(root_name)
def edges(tree: FileEntry | FileEntryRoot, parent=None):
collapsed_count = 0
collapsed_size = 0
for entry in tree:
if entry["type"] == "blob":
if collapse_blobs:
collapsed_size += entry["object"]["byteSize"]
collapsed_count += 1
else:
_ = FileEntry(entry["name"], entry["object"]["byteSize"], parent=parent)
else:
node = FileEntry(entry["name"], parent=parent)
if entry["object"]:
edges(entry["object"]["entries"], parent=node)
if collapse_blobs:
_ = FileEntry(f"[orange1]{collapsed_count}[/] Files", collapsed_size, parent=parent)
edges(data, root)
return root
class Reversor:
def __init__(self, obj: Any) -> None:
self.obj = obj
def __eq__(self, other: Any) -> bool:
return other.obj == self.obj
def __lt__(self, other: Any) -> bool:
return other.obj < self.obj
def sort_entries(entries: List[Any]) -> List[Any]:
entries = sorted(
entries, key=lambda x: (
Reversor(x["type"]),
x["name"].lower()
)
)
for entry in entries:
if entry["type"] == "tree" and entry["object"]:
entry["object"]["entries"] = sort_entries(entry["object"]["entries"])
return entries
| true
| true
|
f715ac6786b1c8d153bae843595f1b3b37a7b901
| 6,518
|
py
|
Python
|
casinotools/fileformat/casino3/IntensityImage.py
|
drix00/pycasinotools
|
2e33b42fb7c7629b35f007be5a404fdd1c45c771
|
[
"Apache-2.0"
] | 2
|
2019-07-14T23:16:09.000Z
|
2019-10-26T10:54:38.000Z
|
casinotools/fileformat/casino3/IntensityImage.py
|
drix00/pycasinotools
|
2e33b42fb7c7629b35f007be5a404fdd1c45c771
|
[
"Apache-2.0"
] | 5
|
2017-02-06T16:50:48.000Z
|
2020-08-21T03:50:06.000Z
|
casinotools/fileformat/casino3/IntensityImage.py
|
drix00/pycasinotools
|
2e33b42fb7c7629b35f007be5a404fdd1c45c771
|
[
"Apache-2.0"
] | 5
|
2016-05-03T16:41:14.000Z
|
2022-01-14T22:22:58.000Z
|
#!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2009 Hendrix Demers"
__license__ = ""
# Standard library modules.
import logging
import os.path
# Third party modules.
from PIL import Image
# Local modules.
import casinotools.fileformat.casino3.File as File
import casinotools.fileformat.casino3.ScanPointResults as ScanPointResults
# Globals and constants variables.
INTENSITY_TRANSMITTED = "TransmittedIntensity"
INTENSITY_TRANSMITTED_DETECTED = "TransmittedDetectedIntensity"
class IntensityImage(object):
def __init__(self, filepath, imageName="IntensityImage", intensityType=INTENSITY_TRANSMITTED_DETECTED):
self._filepath = filepath
self._imageName = imageName
self._intensityType = intensityType
self._imageSize = (800, 600)
self._createGetIntensityMethod()
def _createGetIntensityMethod(self):
if self._intensityType == INTENSITY_TRANSMITTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedCoefficient
elif self._intensityType == INTENSITY_TRANSMITTED_DETECTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedDetectedCoefficient
def _createImage(self):
self._extractData()
self._analyzePositions()
self._createRawImage2()
def _extractData(self):
casinoFile = File.File(self._filepath)
casinoFile.open()
assert 1 == casinoFile.getNumberSimulations()
scanPointsResults = casinoFile.getScanPointResults()
self._numberScanPoints = len(scanPointsResults)
self._positions = []
self._intensities = {}
for scanPointResults in scanPointsResults:
position = scanPointResults.getPosition()
self._positions.append(position)
self._intensities[position] = self._getIntensity(scanPointResults)
def _analyzePositions(self):
self._xSet = set()
self._ySet = set()
self._zSet = set()
for position in self._positions:
x, y, z = position
self._xSet.add(x)
self._ySet.add(y)
self._zSet.add(z)
numberUniqueX = len(self._xSet)
numberUniqueY = len(self._ySet)
numberUniqueZ = len(self._zSet)
imageType = None
if numberUniqueX > 1:
if numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "3D"
else:
imageType = "XY"
elif numberUniqueZ > 1:
imageType = "XZ"
else:
imageType = "X"
elif numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "YZ"
else:
imageType = "Y"
elif numberUniqueZ > 1:
imageType = "Z"
else:
imageType = "P"
self._imageType = imageType
logging.info("Number unique X: %i", len(self._xSet))
logging.info("Number unique Y: %i", len(self._ySet))
logging.info("Number unique Z: %i", len(self._zSet))
logging.info("Image type: %s", imageType)
def _createRawImage(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
data = []
for y in sorted(self._xSet):
for x in sorted(self._ySet):
position = x, y, z
intensity = self._intensities[position]
data.append(intensity)
self._imageRaw.putdata(data)
def _createRawImage2(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
pix = self._imageRaw.load()
for indexH, x in enumerate(sorted(self._xSet)):
for indexV, y in enumerate(sorted(self._ySet)):
position = (x, y, z)
#index = positions.index(position)
value = self._intensities[position]
pix[indexH, indexV] = value
def save(self, path):
self._saveRawImage(path)
#self._saveImage(path)
def _saveRawImage(self, path):
imageFilepath = os.path.join(path, self._imageName + "_raw.tiff")
self._imageRaw.save(imageFilepath)
def _saveImage(self, path):
size = self._imageRaw.size
zoomFactor = self._computeZoomFactor(size)
newSize = size[0] * zoomFactor, size[1] * zoomFactor
filters = {"near": Image.NEAREST, "bilin": Image.BILINEAR,
"bicub": Image.BICUBIC, "anti": Image.ANTIALIAS}
for name, filter in filters.items():
imageFilepath = os.path.join(path, self._imageName + "_" + name + ".tiff")
image = self._imageRaw.resize(newSize, filter)
image.save(imageFilepath)
imageFilepath = os.path.join(path, self._imageName + ".tiff")
tmpImage = self._imageRaw.resize(newSize, Image.BICUBIC)
#tmpImage = tmpImage.convert('L')
image = Image.new(tmpImage.mode, self._imageSize)
topCorner = (self._imageSize[0] - tmpImage.size[0]) / 2, (self._imageSize[1] - tmpImage.size[1]) / 2
box = topCorner[0], topCorner[1], topCorner[0] + tmpImage.size[0], topCorner[1] + tmpImage.size[1]
image.paste(tmpImage, box)
image.save(imageFilepath)
#tmpImage.save(imageFilepath)
def _computeZoomFactor(self, size):
xZoom = int(self._imageSize[0] / size[0])
yZoom = int(self._imageSize[1] / size[1])
zoom = min(xZoom, yZoom)
return zoom
def run():
from pkg_resources import resource_filename #@UnresolvedImport
resultsPath = resource_filename(__name__, "../../test_data/casino3.x/createImage")
casBinnedFilepath = os.path.join(resultsPath, "Au_C_thin_1nm_Inside_100ke_binned.cas")
imageBinned = IntensityImage(casBinnedFilepath)
imageBinned._createImage()
imageBinned.save(resultsPath)
if __name__ == '__main__':
run()
| 35.617486
| 109
| 0.596502
|
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2009 Hendrix Demers"
__license__ = ""
import logging
import os.path
from PIL import Image
import casinotools.fileformat.casino3.File as File
import casinotools.fileformat.casino3.ScanPointResults as ScanPointResults
INTENSITY_TRANSMITTED = "TransmittedIntensity"
INTENSITY_TRANSMITTED_DETECTED = "TransmittedDetectedIntensity"
class IntensityImage(object):
def __init__(self, filepath, imageName="IntensityImage", intensityType=INTENSITY_TRANSMITTED_DETECTED):
self._filepath = filepath
self._imageName = imageName
self._intensityType = intensityType
self._imageSize = (800, 600)
self._createGetIntensityMethod()
def _createGetIntensityMethod(self):
if self._intensityType == INTENSITY_TRANSMITTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedCoefficient
elif self._intensityType == INTENSITY_TRANSMITTED_DETECTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedDetectedCoefficient
def _createImage(self):
self._extractData()
self._analyzePositions()
self._createRawImage2()
def _extractData(self):
casinoFile = File.File(self._filepath)
casinoFile.open()
assert 1 == casinoFile.getNumberSimulations()
scanPointsResults = casinoFile.getScanPointResults()
self._numberScanPoints = len(scanPointsResults)
self._positions = []
self._intensities = {}
for scanPointResults in scanPointsResults:
position = scanPointResults.getPosition()
self._positions.append(position)
self._intensities[position] = self._getIntensity(scanPointResults)
def _analyzePositions(self):
self._xSet = set()
self._ySet = set()
self._zSet = set()
for position in self._positions:
x, y, z = position
self._xSet.add(x)
self._ySet.add(y)
self._zSet.add(z)
numberUniqueX = len(self._xSet)
numberUniqueY = len(self._ySet)
numberUniqueZ = len(self._zSet)
imageType = None
if numberUniqueX > 1:
if numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "3D"
else:
imageType = "XY"
elif numberUniqueZ > 1:
imageType = "XZ"
else:
imageType = "X"
elif numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "YZ"
else:
imageType = "Y"
elif numberUniqueZ > 1:
imageType = "Z"
else:
imageType = "P"
self._imageType = imageType
logging.info("Number unique X: %i", len(self._xSet))
logging.info("Number unique Y: %i", len(self._ySet))
logging.info("Number unique Z: %i", len(self._zSet))
logging.info("Image type: %s", imageType)
def _createRawImage(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
data = []
for y in sorted(self._xSet):
for x in sorted(self._ySet):
position = x, y, z
intensity = self._intensities[position]
data.append(intensity)
self._imageRaw.putdata(data)
def _createRawImage2(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
pix = self._imageRaw.load()
for indexH, x in enumerate(sorted(self._xSet)):
for indexV, y in enumerate(sorted(self._ySet)):
position = (x, y, z)
value = self._intensities[position]
pix[indexH, indexV] = value
def save(self, path):
self._saveRawImage(path)
def _saveRawImage(self, path):
imageFilepath = os.path.join(path, self._imageName + "_raw.tiff")
self._imageRaw.save(imageFilepath)
def _saveImage(self, path):
size = self._imageRaw.size
zoomFactor = self._computeZoomFactor(size)
newSize = size[0] * zoomFactor, size[1] * zoomFactor
filters = {"near": Image.NEAREST, "bilin": Image.BILINEAR,
"bicub": Image.BICUBIC, "anti": Image.ANTIALIAS}
for name, filter in filters.items():
imageFilepath = os.path.join(path, self._imageName + "_" + name + ".tiff")
image = self._imageRaw.resize(newSize, filter)
image.save(imageFilepath)
imageFilepath = os.path.join(path, self._imageName + ".tiff")
tmpImage = self._imageRaw.resize(newSize, Image.BICUBIC)
image = Image.new(tmpImage.mode, self._imageSize)
topCorner = (self._imageSize[0] - tmpImage.size[0]) / 2, (self._imageSize[1] - tmpImage.size[1]) / 2
box = topCorner[0], topCorner[1], topCorner[0] + tmpImage.size[0], topCorner[1] + tmpImage.size[1]
image.paste(tmpImage, box)
image.save(imageFilepath)
def _computeZoomFactor(self, size):
xZoom = int(self._imageSize[0] / size[0])
yZoom = int(self._imageSize[1] / size[1])
zoom = min(xZoom, yZoom)
return zoom
def run():
from pkg_resources import resource_filename
resultsPath = resource_filename(__name__, "../../test_data/casino3.x/createImage")
casBinnedFilepath = os.path.join(resultsPath, "Au_C_thin_1nm_Inside_100ke_binned.cas")
imageBinned = IntensityImage(casBinnedFilepath)
imageBinned._createImage()
imageBinned.save(resultsPath)
if __name__ == '__main__':
run()
| true
| true
|
f715af0a24dd23852f403a9a2f9f37a1c461984d
| 66
|
py
|
Python
|
programaker_twitter_service/__init__.py
|
plaza-project/twitter-bridge
|
0b1807fef5817b2535eecc3b795e58685ff08ff5
|
[
"Apache-2.0"
] | 1
|
2020-12-19T05:04:19.000Z
|
2020-12-19T05:04:19.000Z
|
programaker_twitter_service/__init__.py
|
plaza-project/twitter-bridge
|
0b1807fef5817b2535eecc3b795e58685ff08ff5
|
[
"Apache-2.0"
] | null | null | null |
programaker_twitter_service/__init__.py
|
plaza-project/twitter-bridge
|
0b1807fef5817b2535eecc3b795e58685ff08ff5
|
[
"Apache-2.0"
] | null | null | null |
from . import config, storage
from .listener import TweetListener
| 22
| 35
| 0.818182
|
from . import config, storage
from .listener import TweetListener
| true
| true
|
f715af426554e6845a9d59b633445b811a99ff66
| 1,074
|
py
|
Python
|
core/api/base.py
|
care2donate/care2donate
|
5f99e7169653a96b6e6db44f90afee17758a4480
|
[
"MIT"
] | 1
|
2021-05-14T15:21:42.000Z
|
2021-05-14T15:21:42.000Z
|
core/api/base.py
|
care2donate/care2donate
|
5f99e7169653a96b6e6db44f90afee17758a4480
|
[
"MIT"
] | 2
|
2021-05-13T10:26:36.000Z
|
2021-05-13T19:30:25.000Z
|
core/api/base.py
|
care2donate/care2donate
|
5f99e7169653a96b6e6db44f90afee17758a4480
|
[
"MIT"
] | null | null | null |
from django.db import transaction
from rest_framework import generics, mixins
class BaseAPIView(generics.GenericAPIView,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
@transaction.atomic
def get(self, request, *args, **kwargs):
if kwargs.get('pk'):
return self.retrieve(request, *args, **kwargs)
else:
return self.list(request, *args, **kwargs)
@transaction.atomic
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
@transaction.atomic
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
@transaction.atomic
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
@transaction.atomic
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| 31.588235
| 60
| 0.620112
|
from django.db import transaction
from rest_framework import generics, mixins
class BaseAPIView(generics.GenericAPIView,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
@transaction.atomic
def get(self, request, *args, **kwargs):
if kwargs.get('pk'):
return self.retrieve(request, *args, **kwargs)
else:
return self.list(request, *args, **kwargs)
@transaction.atomic
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
@transaction.atomic
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
@transaction.atomic
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
@transaction.atomic
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| true
| true
|
f715b0166e705758a0701b19f80e34986238aa34
| 4,522
|
py
|
Python
|
tests/contrib/sensors/test_wasb_sensor.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | 4
|
2015-11-12T10:58:54.000Z
|
2017-08-05T06:41:36.000Z
|
tests/contrib/sensors/test_wasb_sensor.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | 13
|
2018-07-11T10:45:30.000Z
|
2018-08-18T00:43:30.000Z
|
tests/contrib/sensors/test_wasb_sensor.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | 5
|
2020-05-12T13:38:14.000Z
|
2022-03-17T17:17:50.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import datetime
from airflow import DAG, configuration
from airflow.contrib.sensors.wasb_sensor import WasbBlobSensor
from airflow.contrib.sensors.wasb_sensor import WasbPrefixSensor
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestWasbBlobSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.blob_name, self._config['blob_name'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.contrib.sensors.wasb_sensor.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_blob.assert_called_once_with(
'container', 'blob', timeout=2
)
class TestWasbPrefixSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'prefix': 'prefix',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.prefix, self._config['prefix'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.contrib.sensors.wasb_sensor.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_prefix.assert_called_once_with(
'container', 'prefix', timeout=2
)
if __name__ == '__main__':
unittest.main()
| 31.402778
| 79
| 0.627377
|
import unittest
import datetime
from airflow import DAG, configuration
from airflow.contrib.sensors.wasb_sensor import WasbBlobSensor
from airflow.contrib.sensors.wasb_sensor import WasbPrefixSensor
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestWasbBlobSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.blob_name, self._config['blob_name'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.contrib.sensors.wasb_sensor.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_blob.assert_called_once_with(
'container', 'blob', timeout=2
)
class TestWasbPrefixSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'prefix': 'prefix',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.prefix, self._config['prefix'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.contrib.sensors.wasb_sensor.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_prefix.assert_called_once_with(
'container', 'prefix', timeout=2
)
if __name__ == '__main__':
unittest.main()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.