text stringlengths 957 885k |
|---|
<gh_stars>0
import bpy
from mathutils import Vector
from ...utils import copy_bone, flip_bone, put_bone, org
from ...utils import strip_org, make_deformer_name, connected_children_names
from ...utils import create_circle_widget, create_sphere_widget, create_widget
from ...utils import MetarigError, make_mechanism_name, create_cube_widget
from rna_prop_ui import rna_idprop_ui_prop_get
script = """
controls = [%s]
torso = '%s'
if is_selected( controls ):
layout.prop( pose_bones[ torso ], '["%s"]', slider = True )
layout.prop( pose_bones[ torso ], '["%s"]', slider = True )
"""
class Rig:
def __init__(self, obj, bone_name, params):
""" Initialize torso rig and key rig properties """
eb = obj.data.edit_bones
self.obj = obj
self.org_bones = [bone_name] + connected_children_names(obj, bone_name)
self.params = params
self.spine_length = sum( [ eb[b].length for b in self.org_bones ] )
# Check if user provided the positions of the neck and pivot
if params.neck_pos and params.pivot_pos:
self.neck_pos = params.neck_pos
self.pivot_pos = params.pivot_pos
else:
raise MetarigError(
"RIGIFY ERROR: please specify neck and pivot bone positions"
)
# Check if neck is lower than pivot
if params.neck_pos <= params.pivot_pos:
raise MetarigError(
"RIGIFY ERROR: Neck cannot be below or the same as pivot"
)
# TODO:
# Limit neck_pos prop to 1 --> num of bones - 1 (last is head)
# Limit pivot_pos prop to 2 --> num of bones (must leave place for lower torso)
if params.tail_pos:
self.tail_pos = params.tail_pos
# Assign values to tweak layers props if opted by user
if params.tweak_extra_layers:
self.tweak_layers = list(params.tweak_layers)
else:
self.tweak_layers = None
# Report error of user created less than the minimum of 4 bones for rig
if len(self.org_bones) <= 4:
raise MetarigError(
"RIGIFY ERROR: invalid rig structure" % (strip_org(bone_name))
)
def build_bone_structure( self ):
""" Divide meta-rig into lists of bones according to torso rig anatomy:
Neck --> Upper torso --> Lower torso --> Tail (optional) """
if self.pivot_pos and self.neck_pos:
neck_index = self.neck_pos - 1
pivot_index = self.pivot_pos - 1
tail_index = 0
if 'tail_pos' in dir(self):
tail_index = self.tail_pos - 1
neck_bones = self.org_bones[neck_index::]
upper_torso_bones = self.org_bones[pivot_index:neck_index]
lower_torso_bones = self.org_bones[tail_index:pivot_index]
tail_bones = []
if tail_index:
tail_bones = self.org_bones[::tail_index+1]
return {
'neck' : neck_bones,
'upper' : upper_torso_bones,
'lower' : lower_torso_bones,
'tail' : tail_bones
}
else:
return 'ERROR'
def orient_bone( self, eb, axis, scale, reverse = False ):
v = Vector((0,0,0))
setattr(v,axis,scale)
if reverse:
tail_vec = v * self.obj.matrix_world
eb.head[:] = eb.tail
eb.tail[:] = eb.head + tail_vec
else:
tail_vec = v * self.obj.matrix_world
eb.tail[:] = eb.head + tail_vec
def create_pivot( self, pivot ):
""" Create the pivot control and mechanism bones """
org_bones = self.org_bones
pivot_name = org_bones[pivot-1]
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
# Create torso control bone
torso_name = 'torso'
ctrl_name = copy_bone(self.obj, pivot_name, torso_name)
ctrl_eb = eb[ ctrl_name ]
self.orient_bone( ctrl_eb, 'y', self.spine_length / 2.5 )
# Create mch_pivot
mch_name = make_mechanism_name( 'pivot' )
mch_name = copy_bone(self.obj, ctrl_name, mch_name)
mch_eb = eb[ mch_name ]
mch_eb.length /= 4
# Positioning pivot in a more usable location for animators
if hasattr(self,'tail_pos') and self.tail_pos > 0:
pivot_loc = eb[ org_bones[pivot-1]].head
else:
pivot_loc = ( eb[ org_bones[0]].head + eb[ org_bones[0]].tail ) / 2
put_bone( self.obj, ctrl_name, pivot_loc )
return {
'ctrl' : ctrl_name,
'mch' : mch_name
}
def create_deform( self ):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
def_bones = []
for org in org_bones:
def_name = make_deformer_name( strip_org( org ) )
def_name = copy_bone( self.obj, org, def_name )
def_bones.append( def_name )
return def_bones
def create_neck( self, neck_bones ):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
# Create neck control
neck = copy_bone( self.obj, org(neck_bones[0]), 'neck' )
neck_eb = eb[ neck ]
# Neck spans all neck bones (except head)
neck_eb.tail[:] = eb[ org(neck_bones[-1]) ].head
# Create head control
head = copy_bone( self.obj, org(neck_bones[-1]), 'head' )
# MCH bones
# Neck MCH stretch
mch_str = copy_bone( self.obj, neck, make_mechanism_name('STR-neck') )
# Neck MCH rotation
mch_neck = copy_bone(
self.obj, neck, make_mechanism_name('ROT-neck')
)
self.orient_bone( eb[mch_neck], 'y', self.spine_length / 10 )
# Head MCH rotation
mch_head = copy_bone(
self.obj, head, make_mechanism_name('ROT-head')
)
self.orient_bone( eb[mch_head], 'y', self.spine_length / 10 )
twk,mch = [],[]
# Intermediary bones
for b in neck_bones[1:-1]: # All except 1st neck and (last) head
mch_name = copy_bone( self.obj, org(b), make_mechanism_name(b) )
eb[mch_name].length /= 4
mch += [ mch_name ]
# Tweak bones
for b in neck_bones[:-1]: # All except last bone
twk_name = "tweak_" + b
twk_name = copy_bone( self.obj, org(b), twk_name )
eb[twk_name].length /= 2
twk += [ twk_name ]
return {
'ctrl_neck' : neck,
'ctrl' : head,
'mch_str' : mch_str,
'mch_neck' : mch_neck,
'mch_head' : mch_head,
'mch' : mch,
'tweak' : twk
}
def create_chest( self, chest_bones ):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
# get total spine length
# Create chest control bone
chest = copy_bone( self.obj, org( chest_bones[0] ), 'chest' )
self.orient_bone( eb[chest], 'y', self.spine_length / 3 )
# create chest mch_wgt
mch_wgt = copy_bone(
self.obj, org( chest_bones[-1] ),
make_mechanism_name( 'WGT-chest' )
)
# Create mch and twk bones
twk,mch = [],[]
for b in chest_bones:
mch_name = copy_bone( self.obj, org(b), make_mechanism_name(b) )
self.orient_bone( eb[mch_name], 'y', self.spine_length / 10 )
twk_name = "tweak_" + b
twk_name = copy_bone( self.obj, org(b), twk_name )
eb[twk_name].length /= 2
mch += [ mch_name ]
twk += [ twk_name ]
return {
'ctrl' : chest,
'mch' : mch,
'tweak' : twk,
'mch_wgt' : mch_wgt
}
def create_hips( self, hip_bones ):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
# Create hips control bone
hips = copy_bone( self.obj, org( hip_bones[-1] ), 'hips' )
self.orient_bone(
eb[hips],
'y',
self.spine_length / 4,
reverse = True
)
# create hips mch_wgt
mch_wgt = copy_bone(
self.obj, org( hip_bones[0] ),
make_mechanism_name( 'WGT-hips' )
)
# Create mch and tweak bones
twk,mch = [],[]
for b in hip_bones:
mch_name = copy_bone( self.obj, org(b), make_mechanism_name(b) )
self.orient_bone(
eb[mch_name], 'y', self.spine_length / 10, reverse = True
)
twk_name = "tweak_" + b
twk_name = copy_bone( self.obj, org( b ), twk_name )
eb[twk_name].length /= 2
mch += [ mch_name ]
twk += [ twk_name ]
return {
'ctrl' : hips,
'mch' : mch,
'tweak' : twk,
'mch_wgt' : mch_wgt
}
def create_tail( self, tail_bones ):
pass
def parent_bones( self, bones ):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
# Parent deform bones
for i,b in enumerate( bones['def'] ):
if i > 0: # For all bones but the first (which has no parent)
eb[b].parent = eb[ bones['def'][i-1] ] # to previous
eb[b].use_connect = True
# Parent control bones
# Head control => MCH-rotation_head
eb[ bones['neck']['ctrl'] ].parent = eb[ bones['neck']['mch_head'] ]
# MCH stretch => neck ctrl
eb[ bones['neck']['mch_str'] ].parent = eb[ bones['neck']['ctrl_neck'] ]
# Neck control => MCH-rotation_neck
eb[ bones['neck']['ctrl_neck'] ].parent = eb[ bones['neck']['mch_neck'] ]
# Parent hips and chest controls to torso
eb[ bones['chest']['ctrl'] ].parent = eb[ bones['pivot']['ctrl'] ]
eb[ bones['hips']['ctrl'] ].parent = eb[ bones['pivot']['ctrl'] ]
# Parent mch bones
# Neck mch
eb[ bones['neck']['mch_head'] ].parent = eb[ bones['neck']['ctrl_neck'] ]
parent = eb[ bones['neck']['mch_str'] ]
for i,b in enumerate([ eb[n] for n in bones['neck']['mch'] ]):
b.parent = parent
# Chest mch bones and neck mch
chest_mch = bones['chest']['mch'] + [ bones['neck']['mch_neck'] ]
for i,b in enumerate(chest_mch):
if i == 0:
eb[b].parent = eb[ bones['pivot']['ctrl'] ]
else:
eb[b].parent = eb[ chest_mch[i-1] ]
# Hips mch bones
for i,b in enumerate( bones['hips']['mch'] ):
if i == len(bones['hips']['mch']) - 1:
eb[b].parent = eb[ bones['pivot']['ctrl'] ]
else:
eb[b].parent = eb[ bones['hips']['mch'][i+1] ]
# mch pivot
eb[ bones['pivot']['mch'] ].parent = eb[ bones['chest']['mch'][0] ]
# MCH widgets
eb[ bones['chest']['mch_wgt'] ].parent = eb[ bones['chest']['mch'][-1] ]
eb[ bones['hips' ]['mch_wgt'] ].parent = eb[ bones['hips' ]['mch'][0 ] ]
# Tweaks
# Neck tweaks
for i,twk in enumerate( bones['neck']['tweak'] ):
if i == 0:
eb[ twk ].parent = eb[ bones['neck']['ctrl_neck'] ]
else:
eb[ twk ].parent = eb[ bones['neck']['mch'][i-1] ]
# Chest tweaks
for twk,mch in zip( bones['chest']['tweak'], bones['chest']['mch'] ):
if bones['chest']['tweak'].index( twk ) == 0:
eb[ twk ].parent = eb[ bones['pivot']['mch'] ]
else:
eb[ twk ].parent = eb[ mch ]
# Hips tweaks
for i,twk in enumerate(bones['hips']['tweak']):
if i == 0:
eb[twk].parent = eb[ bones['hips']['mch'][i] ]
else:
eb[twk].parent = eb[ bones['hips']['mch'][i-1] ]
# Parent orgs to matching tweaks
tweaks = bones['hips']['tweak'] + bones['chest']['tweak']
tweaks += bones['neck']['tweak'] + [ bones['neck']['ctrl'] ]
if 'tail' in bones.keys():
tweaks += bones['tail']['tweak']
for org, twk in zip( org_bones, tweaks ):
eb[ org ].parent = eb[ twk ]
def make_constraint( self, bone, constraint ):
bpy.ops.object.mode_set(mode = 'OBJECT')
pb = self.obj.pose.bones
owner_pb = pb[bone]
const = owner_pb.constraints.new( constraint['constraint'] )
const.target = self.obj
# filter contraint props to those that actually exist in the currnet
# type of constraint, then assign values to each
for p in [ k for k in constraint.keys() if k in dir(const) ]:
setattr( const, p, constraint[p] )
def constrain_bones( self, bones ):
# MCH bones
# head and neck MCH bones
for b in [ bones['neck']['mch_head'], bones['neck']['mch_neck'] ]:
self.make_constraint( b, {
'constraint' : 'COPY_ROTATION',
'subtarget' : bones['pivot']['ctrl'],
} )
self.make_constraint( b, {
'constraint' : 'COPY_SCALE',
'subtarget' : bones['pivot']['ctrl'],
} )
# Neck MCH Stretch
self.make_constraint( bones['neck']['mch_str'], {
'constraint' : 'DAMPED_TRACK',
'subtarget' : bones['neck']['ctrl'],
})
self.make_constraint( bones['neck']['mch_str'], {
'constraint' : 'STRETCH_TO',
'subtarget' : bones['neck']['ctrl'],
})
# Intermediary mch bones
intermediaries = [ bones['neck'], bones['chest'], bones['hips'] ]
if 'tail' in bones.keys():
intermediaries += bones['tail']
for i,l in enumerate(intermediaries):
mch = l['mch']
factor = float( 1 / len( l['tweak'] ) )
for j,b in enumerate(mch):
if i == 0:
nfactor = float( (j + 1) / len( mch ) )
self.make_constraint( b, {
'constraint' : 'COPY_ROTATION',
'subtarget' : l['ctrl'],
'influence' : nfactor
} )
else:
self.make_constraint( b, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : l['ctrl'],
'influence' : factor,
'owner_space' : 'LOCAL',
'target_space' : 'LOCAL'
} )
# MCH pivot
self.make_constraint( bones['pivot']['mch'], {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : bones['hips']['mch'][-1],
'owner_space' : 'LOCAL',
'target_space' : 'LOCAL'
})
# DEF bones
deform = bones['def']
tweaks = bones['hips']['tweak'] + bones['chest']['tweak']
tweaks += bones['neck']['tweak'] + [ bones['neck']['ctrl'] ]
for d,t in zip(deform, tweaks):
tidx = tweaks.index(t)
self.make_constraint( d, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : t
})
if tidx != len(tweaks) - 1:
self.make_constraint( d, {
'constraint' : 'DAMPED_TRACK',
'subtarget' : tweaks[ tidx + 1 ],
})
self.make_constraint( d, {
'constraint' : 'STRETCH_TO',
'subtarget' : tweaks[ tidx + 1 ],
})
pb = self.obj.pose.bones
for t in tweaks:
if t != bones['neck']['ctrl']:
pb[t].rotation_mode = 'ZXY'
def create_drivers( self, bones ):
bpy.ops.object.mode_set(mode ='OBJECT')
pb = self.obj.pose.bones
# Setting the torso's props
torso = pb[ bones['pivot']['ctrl'] ]
props = [ "head_follow", "neck_follow" ]
owners = [ bones['neck']['mch_head'], bones['neck']['mch_neck'] ]
for prop in props:
if prop == 'neck_follow':
torso[prop] = 0.5
else:
torso[prop] = 0.0
prop = rna_idprop_ui_prop_get( torso, prop, create=True )
prop["min"] = 0.0
prop["max"] = 1.0
prop["soft_min"] = 0.0
prop["soft_max"] = 1.0
prop["description"] = prop
# driving the follow rotation switches for neck and head
for bone, prop, in zip( owners, props ):
# Add driver to copy rotation constraint
drv = pb[ bone ].constraints[ 0 ].driver_add("influence").driver
drv.type = 'AVERAGE'
var = drv.variables.new()
var.name = prop
var.type = "SINGLE_PROP"
var.targets[0].id = self.obj
var.targets[0].data_path = \
torso.path_from_id() + '['+ '"' + prop + '"' + ']'
drv_modifier = self.obj.animation_data.drivers[-1].modifiers[0]
drv_modifier.mode = 'POLYNOMIAL'
drv_modifier.poly_order = 1
drv_modifier.coefficients[0] = 1.0
drv_modifier.coefficients[1] = -1.0
def locks_and_widgets( self, bones ):
bpy.ops.object.mode_set(mode ='OBJECT')
pb = self.obj.pose.bones
# deform bones bbone segements
for bone in bones['def'][:-1]:
self.obj.data.bones[bone].bbone_segments = 8
self.obj.data.bones[ bones['def'][0] ].bbone_in = 0.0
self.obj.data.bones[ bones['def'][-2] ].bbone_out = 0.0
# Locks
tweaks = bones['neck']['tweak'] + bones['chest']['tweak']
tweaks += bones['hips']['tweak']
if 'tail' in bones.keys():
tweaks += bones['tail']['tweak']
# Tweak bones locks
for bone in tweaks:
pb[bone].lock_rotation = True, False, True
pb[bone].lock_scale = False, True, False
# Widgets
# Assigning a widget to torso bone
create_cube_widget(
self.obj,
bones['pivot']['ctrl'],
radius = 0.5,
bone_transform_name = None
)
# Assigning widgets to control bones
gen_ctrls = [
bones['neck']['ctrl_neck'],
bones['chest']['ctrl'],
bones['hips']['ctrl']
]
if 'tail' in bones.keys():
gen_ctrls += [ bones['tail']['ctrl'] ]
for bone in gen_ctrls:
create_circle_widget(
self.obj,
bone,
radius = 1.0,
head_tail = 0.5,
with_line = False,
bone_transform_name = None
)
# Head widget
create_circle_widget(
self.obj,
bones['neck']['ctrl'],
radius = 0.75,
head_tail = 1.0,
with_line = False,
bone_transform_name = None
)
# place widgets on correct bones
chest_widget_loc = pb[ bones['chest']['mch_wgt'] ]
pb[ bones['chest']['ctrl'] ].custom_shape_transform = chest_widget_loc
hips_widget_loc = pb[ bones['hips']['mch_wgt'] ]
if 'tail' in bones.keys():
hips_widget_loc = bones['def'][self.tail_pos -1]
pb[ bones['hips']['ctrl'] ].custom_shape_transform = hips_widget_loc
# Assigning widgets to tweak bones and layers
for bone in tweaks:
create_sphere_widget(self.obj, bone, bone_transform_name=None)
if self.tweak_layers:
pb[bone].bone.layers = self.tweak_layers
def generate( self ):
# Torso Rig Anatomy:
# Neck: all bones above neck point, last bone is head
# Upper torso: all bones between pivot and neck start
# Lower torso: all bones below pivot until tail point
# Tail: all bones below tail point
bone_chains = self.build_bone_structure()
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
# Clear parents for org bones
for bone in self.org_bones:
eb[bone].use_connect = False
eb[bone].parent = None
if bone_chains != 'ERROR':
# Create lists of bones and strip "ORG" from their names
neck_bones = [ strip_org(b) for b in bone_chains['neck' ] ]
upper_torso_bones = [ strip_org(b) for b in bone_chains['upper'] ]
lower_torso_bones = [ strip_org(b) for b in bone_chains['lower'] ]
tail_bones = [ strip_org(b) for b in bone_chains['tail' ] ]
bones = {}
bones['def'] = self.create_deform() # Gets org bones from self
bones['pivot'] = self.create_pivot( self.pivot_pos )
bones['neck'] = self.create_neck( neck_bones )
bones['chest'] = self.create_chest( upper_torso_bones )
bones['hips'] = self.create_hips( lower_torso_bones )
# TODO: Add create tail
if tail_bones:
bones['tail'] = self.create_tail( tail_bones )
# TEST
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
self.parent_bones( bones )
self.constrain_bones( bones )
self.create_drivers( bones )
self.locks_and_widgets( bones )
controls = [ bones['neck']['ctrl'], bones['neck']['ctrl_neck'] ]
controls += [ bones['chest']['ctrl'], bones['hips']['ctrl'] ]
controls += [ bones['pivot']['ctrl'] ]
if 'tail' in bones.keys():
controls += [ bones['tail']['ctrl'] ]
# Create UI
controls_string = ", ".join(["'" + x + "'" for x in controls])
return [script % (
controls_string,
bones['pivot']['ctrl'],
'head_follow',
'neck_follow'
)]
def add_parameters( params ):
""" Add the parameters of this rig type to the
RigifyParameters PropertyGroup
"""
params.neck_pos = bpy.props.IntProperty(
name = 'neck_position',
default = 6,
min = 0,
description = 'Neck start position'
)
params.pivot_pos = bpy.props.IntProperty(
name = 'pivot_position',
default = 3,
min = 0,
description = 'Position of the torso control and pivot point'
)
params.tail_pos = bpy.props.IntProperty(
name = 'tail_position',
default = 0,
min = 0,
description = 'Where the tail starts (change from 0 to enable)'
)
# Setting up extra layers for the FK and tweak
params.tweak_extra_layers = bpy.props.BoolProperty(
name = "tweak_extra_layers",
default = True,
description = ""
)
params.tweak_layers = bpy.props.BoolVectorProperty(
size = 32,
description = "Layers for the tweak controls to be on",
default = tuple( [ i == 1 for i in range(0, 32) ] )
)
def parameters_ui(layout, params):
""" Create the ui for the rig parameters."""
r = layout.row()
r.prop(params, "neck_pos")
r = layout.row()
r.prop(params, "pivot_pos")
r = layout.row()
r.prop(params, "tail_pos")
r = layout.row()
r.prop(params, "tweak_extra_layers")
r.active = params.tweak_extra_layers
col = r.column(align=True)
row = col.row(align=True)
for i in range(8):
row.prop(params, "tweak_layers", index=i, toggle=True, text="")
row = col.row(align=True)
for i in range(16,24):
row.prop(params, "tweak_layers", index=i, toggle=True, text="")
col = r.column(align=True)
row = col.row(align=True)
for i in range(8,16):
row.prop(params, "tweak_layers", index=i, toggle=True, text="")
row = col.row(align=True)
for i in range(24,32):
row.prop(params, "tweak_layers", index=i, toggle=True, text="")
def create_sample(obj):
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
bones = {}
bone = arm.edit_bones.new('spine')
bone.head[:] = 0.0000, 0.0552, 1.0099
bone.tail[:] = 0.0000, 0.0172, 1.1573
bone.roll = 0.0000
bone.use_connect = False
bones['spine'] = bone.name
bone = arm.edit_bones.new('spine.001')
bone.head[:] = 0.0000, 0.0172, 1.1573
bone.tail[:] = 0.0000, 0.0004, 1.2929
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine']]
bones['spine.001'] = bone.name
bone = arm.edit_bones.new('spine.002')
bone.head[:] = 0.0000, 0.0004, 1.2929
bone.tail[:] = 0.0000, 0.0059, 1.4657
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.001']]
bones['spine.002'] = bone.name
bone = arm.edit_bones.new('spine.003')
bone.head[:] = 0.0000, 0.0059, 1.4657
bone.tail[:] = 0.0000, 0.0114, 1.6582
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.002']]
bones['spine.003'] = bone.name
bone = arm.edit_bones.new('spine.004')
bone.head[:] = 0.0000, 0.0114, 1.6582
bone.tail[:] = 0.0000, -0.0067, 1.7197
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.003']]
bones['spine.004'] = bone.name
bone = arm.edit_bones.new('spine.005')
bone.head[:] = 0.0000, -0.0067, 1.7197
bone.tail[:] = 0.0000, -0.0247, 1.7813
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.004']]
bones['spine.005'] = bone.name
bone = arm.edit_bones.new('spine.006')
bone.head[:] = 0.0000, -0.0247, 1.7813
bone.tail[:] = 0.0000, -0.0247, 1.9796
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.005']]
bones['spine.006'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['spine']]
pbone.rigify_type = 'pitchipoy.super_torso_turbo'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
try:
pbone.rigify_parameters.chain_bone_controls = "1, 2, 3"
except AttributeError:
pass
try:
pbone.rigify_parameters.neck_pos = 5
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['spine.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['spine.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['spine.004']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['spine.005']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['spine.006']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
|
<filename>feasability_study/odslib.py
#!/usr/bin/env python
"""
Access ASAM Ods server via python using omniorb.
Copyright (c) 2015, <NAME>
License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0.html)
"""
__author__ = "<NAME>"
__license__ = "Apache 2.0"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
from omniORB import CORBA
import org
import re
import logging
_orb_obj = None
def _orb():
global _orb_obj
if _orb_obj is None:
# Initialise the ORB
orbStartParameter = []
orbStartParameter.append("-ORBnativeCharCodeSet")
orbStartParameter.append("UTF-8")
orbStartParameter.append("-ORBgiopMaxMsgSize")
orbStartParameter.append("268435456") # 256 MB
_orb_obj = CORBA.ORB_init(orbStartParameter, CORBA.ORB_ID)
return _orb_obj
_attribute_parser = re.compile(r'\s*((?P<aggregate>(NONE)|(COUNT)|(DCOUNT)|(MIN)|(MAX)|(AVG)|(STDDEV)|(SUM)|(DISTINCT)|(POINT))\()?\s*?(?P<attribute>.*)')
_orderByParser = re.compile(r'\s*((?P<order>(ASCENDING)|(DESCENDING))\()?\s*?(?P<attribute>.*)')
def ValidFlag(flagVal):
return 9 == flagVal & 9
def _parse_path_and_add_joins(model, applElem, attribPath, joinSeq):
aaType = org.asam.ods.DT_UNKNOWN
aaName = ""
aaApplElem = applElem
pathParts = attribPath.split(".")
nrOfPathParts = len(pathParts)
for i in range(nrOfPathParts):
pathPart = pathParts[i]
joinType = org.asam.ods.JTDEFAULT
if ( pathPart.startswith('OUTER(') and pathPart.endswith(')') ):
pathPart = pathPart[6:-1]
joinType = org.asam.ods.JTOUTER
if(i != nrOfPathParts - 1):
# Must be a relation
relation = model.GetRelationEx(aaApplElem.aeName, pathPart)
aaName = relation.arName
aaApplElem = model.GetElemByAid(relation.elem2)
# add join
if (-1 == relation.arRelationRange.max) and (1 == relation.invRelationRange.max):
realRelation = model.FindInverseRelation(relation)
_add_join_to_seq(realRelation, joinSeq, joinType)
else:
_add_join_to_seq(relation, joinSeq, joinType)
else:
# maybe relation or attribute
attribute = model.GetAttributeEx(aaApplElem.aeName, pathPart)
if not attribute is None:
aaName = attribute.aaName
aaType = attribute.dType
else:
relation = model.GetRelationEx(aaApplElem.aeName, pathPart)
aaName = relation.arName
aaType = org.asam.ods.DT_LONGLONG # its an id
return aaType, aaName, aaApplElem
def _add_join_to_seq(relation, joinSeq, joinType):
for join in joinSeq:
if LL_Equal(join.fromAID, relation.elem1) and LL_Equal(join.toAID, relation.elem2) and (join.refName == relation.arName):
# already in sequence
return
joinDef = org.asam.ods.JoinDef(relation.elem1, relation.elem2, relation.arName, joinType)
joinSeq.append(joinDef)
def GetRelationType(relationType):
if relationType == org.asam.ods.FATHER_CHILD:
return "FATHER_CHILD"
elif relationType == org.asam.ods.INFO:
return "INFO"
elif relationType == org.asam.ods.INHERITANCE:
return "INHERITANCE"
return None
def GetAggrTypeStr(aggrType):
if aggrType == org.asam.ods.NONE:
return "NONE"
if aggrType == org.asam.ods.COUNT:
return "COUNT"
elif aggrType == org.asam.ods.DCOUNT:
return "DCOUNT"
elif aggrType == org.asam.ods.MIN:
return "MIN"
elif aggrType == org.asam.ods.MAX:
return "MAX"
elif aggrType == org.asam.ods.AVG:
return "AVG"
elif aggrType == org.asam.ods.STDDEV:
return "STDDEV"
elif aggrType == org.asam.ods.SUM:
return "SUM"
elif aggrType == org.asam.ods.DISTINCT:
return "DISTINCT"
elif aggrType == org.asam.ods.POINT:
return "POINT"
return None
def get_scalar_type(columnType):
if columnType == org.asam.ods.DS_BYTE:
return org.asam.ods.DT_BYTE
elif columnType == org.asam.ods.DS_BOOLEAN:
return org.asam.ods.DT_BOOLEAN
elif columnType == org.asam.ods.DS_SHORT:
return org.asam.ods.DT_SHORT
elif columnType == org.asam.ods.DS_LONG:
return org.asam.ods.DT_LONG
elif columnType == org.asam.ods.DS_LONGLONG:
return org.asam.ods.DT_LONGLONG
elif columnType == org.asam.ods.DS_FLOAT:
return org.asam.ods.DT_FLOAT
elif columnType == org.asam.ods.DS_DOUBLE:
return org.asam.ods.DT_DOUBLE
elif columnType == org.asam.ods.DS_DATE:
return org.asam.ods.DT_DATE
elif columnType == org.asam.ods.DS_STRING:
return org.asam.ods.DT_STRING
elif columnType == org.asam.ods.DS_ENUM:
return org.asam.ods.DT_ENUM
elif columnType == org.asam.ods.DS_COMPLEX:
return org.asam.ods.DT_COMPLEX
elif columnType == org.asam.ods.DS_DCOMPLEX:
return org.asam.ods.DT_DCOMPLEX
elif columnType == org.asam.ods.DS_EXTERNALREFERENCE:
return org.asam.ods.DT_EXTERNALREFERENCE
elif columnType == org.asam.ods.DS_ID:
return org.asam.ods.DT_ID
return columnType
def GetDataTypeStr(dataType):
columnType = dataType
if columnType == org.asam.ods.DT_UNKNOWN:
return "DT_UNKNOWN"
if columnType == org.asam.ods.DT_BYTE:
return "DT_BYTE"
elif columnType == org.asam.ods.DT_BOOLEAN:
return "DT_BOOLEAN"
elif columnType == org.asam.ods.DT_SHORT:
return "DT_SHORT"
elif columnType == org.asam.ods.DT_LONG:
return "DT_LONG"
elif columnType == org.asam.ods.DT_LONGLONG:
return "DT_LONGLONG"
elif columnType == org.asam.ods.DT_FLOAT:
return "DT_FLOAT"
elif columnType == org.asam.ods.DT_DOUBLE:
return "DT_DOUBLE"
elif columnType == org.asam.ods.DT_DATE:
return "DT_DATE"
elif columnType == org.asam.ods.DT_STRING:
return "DT_STRING"
elif columnType == org.asam.ods.DT_ENUM:
return "DT_ENUM"
elif columnType == org.asam.ods.DT_COMPLEX:
return "DT_COMPLEX"
elif columnType == org.asam.ods.DT_DCOMPLEX:
return "DT_DCOMPLEX"
elif columnType == org.asam.ods.DT_EXTERNALREFERENCE:
return "DT_EXTERNALREFERENCE"
elif columnType == org.asam.ods.DS_BYTE:
return "DS_BYTE"
elif columnType == org.asam.ods.DS_BOOLEAN:
return "DS_BOOLEAN"
elif columnType == org.asam.ods.DS_SHORT:
return "DS_SHORT"
elif columnType == org.asam.ods.DS_LONG:
return "DS_LONG"
elif columnType == org.asam.ods.DS_LONGLONG:
return "DS_LONGLONG"
elif columnType == org.asam.ods.DS_FLOAT:
return "DS_FLOAT"
elif columnType == org.asam.ods.DS_DOUBLE:
return "DS_DOUBLE"
elif columnType == org.asam.ods.DS_DATE:
return "DS_DATE"
elif columnType == org.asam.ods.DS_STRING:
return "DS_STRING"
elif columnType == org.asam.ods.DS_ENUM:
return "DS_ENUM"
elif columnType == org.asam.ods.DS_COMPLEX:
return "DS_COMPLEX"
elif columnType == org.asam.ods.DS_DCOMPLEX:
return "DS_DCOMPLEX"
elif columnType == org.asam.ods.DS_EXTERNALREFERENCE:
return "DS_EXTERNALREFERENCE"
return None
def ExtractAttributeNameFromOrderByName(strVal):
m = _orderByParser.search(strVal)
aName = m.group("attribute")
order = m.group("order")
if not order is None:
# cut closing bracket
aName = aName.rstrip(" \t)")
return aName, ("DESCENDING" != order)
def ExtractAttributeNameFromColumnName(columnName):
m = _attribute_parser.search(columnName)
aName = m.group("attribute")
aAggrTypeStr = m.group("aggregate")
aAggrType = org.asam.ods.NONE
if not aAggrTypeStr is None:
# cut closing bracket and determine aggregate type
aName = aName.rstrip(" \t)")
if 'NONE' == aAggrTypeStr: aAggrType = org.asam.ods.NONE
elif 'COUNT' == aAggrTypeStr: aAggrType = org.asam.ods.COUNT
elif 'DCOUNT' == aAggrTypeStr: aAggrType = org.asam.ods.DCOUNT
elif 'MIN' == aAggrTypeStr: aAggrType = org.asam.ods.MIN
elif 'MAX' == aAggrTypeStr: aAggrType = org.asam.ods.MAX
elif 'AVG' == aAggrTypeStr: aAggrType = org.asam.ods.AVG
elif 'STDDEV' == aAggrTypeStr: aAggrType = org.asam.ods.STDDEV
elif 'SUM' == aAggrTypeStr: aAggrType = org.asam.ods.SUM
elif 'DISTINCT' == aAggrTypeStr: aAggrType = org.asam.ods.DISTINCT
elif 'POINT' == aAggrTypeStr: aAggrType = org.asam.ods.POINT
else:
raise Exception("Unknown aggregate type in '" + columnName + "'")
return aName.strip(), aAggrType
def ColumnType(column):
return column.value.u._d
def ColumnGetSeq(column):
if org.asam.ods.DT_LONGLONG == column.value.u._d:
rv = _column_get_seq(column)
iSeq = []
for val in rv:
iSeq.append(LL2Int(val))
return iSeq
if org.asam.ods.DT_STRING == column.value.u._d:
rv = _column_get_seq(column)
iSeq = []
for val in rv:
iSeq.append(val.decode('utf-8'))
return iSeq
if org.asam.ods.DT_DATE == column.value.u._d:
rv = _column_get_seq(column)
iSeq = []
for val in rv:
iSeq.append(val.decode('utf-8'))
return iSeq
if org.asam.ods.DT_EXTERNALREFERENCE == column.value.u._d:
rv = _column_get_seq(column)
iSeq = []
for val in rv:
iSeq.append(val.description.decode('utf-8'))
iSeq.append(val.mimeType.decode('utf-8'))
iSeq.append(val.location.decode('utf-8'))
return iSeq
return _column_get_seq(column)
def _column_get_seq(column):
columnType = column.value.u._d
if columnType == org.asam.ods.DT_BYTE:
return column.value.u.byteVal
elif columnType == org.asam.ods.DT_BOOLEAN:
return column.value.u.booleanVal
elif columnType == org.asam.ods.DT_SHORT:
return column.value.u.shortVal
elif columnType == org.asam.ods.DT_LONG:
return column.value.u.longVal
elif columnType == org.asam.ods.DT_LONGLONG:
return column.value.u.longlongVal
elif columnType == org.asam.ods.DT_FLOAT:
return column.value.u.floatVal
elif columnType == org.asam.ods.DT_DOUBLE:
return column.value.u.doubleVal
elif columnType == org.asam.ods.DT_DATE:
return column.value.u.dateVal
elif columnType == org.asam.ods.DT_STRING:
return column.value.u.stringVal
elif columnType == org.asam.ods.DT_ENUM:
return column.value.u.enumVal
elif columnType == org.asam.ods.DT_COMPLEX:
return column.value.u.complexVal
elif columnType == org.asam.ods.DT_DCOMPLEX:
return column.value.u.dcomplexVal
elif columnType == org.asam.ods.DT_EXTERNALREFERENCE:
return column.value.u.extRefVal
elif columnType == org.asam.ods.DS_BYTE:
return column.value.u.byteSeq
elif columnType == org.asam.ods.DS_BOOLEAN:
return column.value.u.booleanSeq
elif columnType == org.asam.ods.DS_SHORT:
return column.value.u.shortSeq
elif columnType == org.asam.ods.DS_LONG:
return column.value.u.longSeq
elif columnType == org.asam.ods.DS_LONGLONG:
return column.value.u.longlongSeq
elif columnType == org.asam.ods.DS_FLOAT:
return column.value.u.floatSeq
elif columnType == org.asam.ods.DS_DOUBLE:
return column.value.u.doubleSeq
elif columnType == org.asam.ods.DS_DATE:
return column.value.u.dateSeq
elif columnType == org.asam.ods.DS_STRING:
return column.value.u.dstringSeq
elif columnType == org.asam.ods.DS_ENUM:
return column.value.u.enumSeq
elif columnType == org.asam.ods.DS_COMPLEX:
return column.value.u.complexSeq
elif columnType == org.asam.ods.DS_DCOMPLEX:
return column.value.u.dcomplexSeq
elif columnType == org.asam.ods.DS_EXTERNALREFERENCE:
return column.value.u.extRefSeq
logging.error("_column_get_seq: Unknown column type " + str(columnType))
return None
def IsSequence(arrayType):
if arrayType == org.asam.ods.DS_BYTE:
return True
elif arrayType == org.asam.ods.DS_BOOLEAN:
return True
elif arrayType == org.asam.ods.DS_SHORT:
return True
elif arrayType == org.asam.ods.DS_LONG:
return True
elif arrayType == org.asam.ods.DS_LONGLONG:
return True
elif arrayType == org.asam.ods.DS_FLOAT:
return True
elif arrayType == org.asam.ods.DS_DOUBLE:
return True
elif arrayType == org.asam.ods.DS_DATE:
return True
elif arrayType == org.asam.ods.DS_STRING:
return True
elif arrayType == org.asam.ods.DS_ENUM:
return True
elif arrayType == org.asam.ods.DS_COMPLEX:
return True
elif arrayType == org.asam.ods.DS_DCOMPLEX:
return True
elif arrayType == org.asam.ods.DS_EXTERNALREFERENCE:
return True
return False
def ColumnCountRows(column):
seq = _column_get_seq(column)
if seq is None:
return 0
return len(seq)
def LL0():
return org.asam.ods.T_LONGLONG(0, 0)
def LL2Int(val):
if 0 != val.high:
rv = val.high << 32
rv = rv | val.low
return rv
else:
return val.low
def Int2LL(val):
return org.asam.ods.T_LONGLONG(int((val >> 32) & 0xFFFFFFFF), int(val & 0xFFFFFFFF))
def LL_Equal(val1, val2):
if val1.high != val2.high:
return False
if val1.low != val2.low:
return False
return True
def LL_Is0(val):
if 0 != val.high:
return False
if 0 != val.low:
return False
return True
def GetTsValue(tsVal):
if tsVal.u._d == org.asam.ods.DT_BYTE: return tsVal.u.byteVal
elif tsVal.u._d == org.asam.ods.DT_BOOLEAN: return tsVal.u.booleanVal
elif tsVal.u._d == org.asam.ods.DT_SHORT: return tsVal.u.shortVal
elif tsVal.u._d == org.asam.ods.DT_LONG: return tsVal.u.longVal
elif tsVal.u._d == org.asam.ods.DT_LONGLONG: return LL2Int(tsVal.u.longlongVal)
elif tsVal.u._d == org.asam.ods.DT_FLOAT: return tsVal.u.floatVal
elif tsVal.u._d == org.asam.ods.DT_DOUBLE: return tsVal.u.doubleVal
elif tsVal.u._d == org.asam.ods.DT_DATE: return tsVal.u.dateVal
elif tsVal.u._d == org.asam.ods.DT_STRING: return tsVal.u.stringVal
elif tsVal.u._d == org.asam.ods.DT_ENUM: return tsVal.u.enumVal
#elif aaType == org.asam.ods.DT_COMPLEX:
#elif aaType == org.asam.ods.DT_DCOMPLEX:
#elif aaType == org.asam.ods.DT_EXTERNALREFERENCE:
else:
raise Exception("Unable to read TS_Value.")
def CreateTsValue(aaType, strVal):
if aaType == org.asam.ods.DT_BYTE: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, org.asam.ods.T_BYTE(int(strVal))), 15)
elif aaType == org.asam.ods.DT_BOOLEAN: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, org.asam.ods.T_BOOLEAN(int(strVal))), 15)
elif aaType == org.asam.ods.DT_SHORT: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, org.asam.ods.T_SHORT(int(strVal))), 15)
elif aaType == org.asam.ods.DT_LONG: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, org.asam.ods.T_LONG(long(strVal))), 15)
elif aaType == org.asam.ods.DT_LONGLONG: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, Int2LL(long(strVal))), 15)
elif aaType == org.asam.ods.DT_FLOAT: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, float(strVal)), 15)
elif aaType == org.asam.ods.DT_DOUBLE: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, float(strVal)), 15)
elif aaType == org.asam.ods.DT_DATE: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, strVal.encode('utf-8')), 15)
elif aaType == org.asam.ods.DT_STRING: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, strVal.encode('utf-8')), 15)
elif aaType == org.asam.ods.DT_ENUM: return org.asam.ods.TS_Value(org.asam.ods.TS_Union(aaType, org.asam.ods.T_LONG(long(strVal))), 15)
#elif aaType == org.asam.ods.DT_COMPLEX:
#elif aaType == org.asam.ods.DT_DCOMPLEX:
#elif aaType == org.asam.ods.DT_EXTERNALREFERENCE:
else:
raise Exception("Unknown how to attach '" + strVal + "' does not exist as " + str(aaType) + " union.")
def _get_session(params):
objString = params['$URL']
logging.info("ORB: resolve objecturl")
obj = _orb().string_to_object(objString)
if obj is None:
return None
logging.info("ORB: object retrieved")
factory = obj._narrow(org.asam.ods.AoFactory)
if (factory is None):
return None
logging.info("ORB: Got factory")
nvs = []
for paramName, paramValue in params.iteritems():
if not paramName.startswith('$'):
nvs.append(org.asam.ods.NameValue(paramName.encode('utf-8'), org.asam.ods.TS_Value(org.asam.ods.TS_Union(org.asam.ods.DT_STRING, paramValue.encode('utf-8')), 15)))
logging.info("AoFactory: Establish new session")
session = factory.newSessionNameValue(nvs)
if session is None:
return None
logging.info("AoFactory: Session retrieved")
return session
class CModel:
model_ = None
enums_ = None
enumAttribs_ = None
def __init__(self, session):
self.model_ = session.getApplicationStructureValue()
try:
self.enums_ = session.getEnumerationStructure()
except org.asam.ods.AoException, ex:
logging.error('Unable to retrieve enum struct:' + ex)
except CORBA.BAD_OPERATION, ex:
logging.error('Unable to retrieve enum struct:' + ex)
try:
self.enumAttribs_ = session.getEnumerationAttributes()
except org.asam.ods.AoException, ex:
logging.error('Unable to retrieve enum attributes:' + ex)
except CORBA.BAD_OPERATION, ex:
logging.error('Unable to retrieve enum attributes:' + ex)
def GetEnumIndex(self, elem, aaName, name):
enumName = self.GetEnumName(elem.aid, aaName)
for enum in self.enums_:
if enumName == enum.enumName:
for enumItem in enum.items:
if name == enumItem.itemName:
return enumItem.index
raise Exception("Enum '" + enumName + "' does not contain item named '" + name + "'")
raise Exception("Enum not resolvable")
def GetEnumName(self, aid, aaName):
for enumAttrib in self.enumAttribs_:
if aaName == enumAttrib.aaName and LL_Equal(aid, enumAttrib.aid):
return enumAttrib.enumName
return None
def Aid(self, aeName):
elem = self.GetElem(aeName)
return elem.aid
def GetElemEx(self, strVal):
rv = self.GetElem(strVal)
if rv is None:
rv = self.GetElemB(strVal)
if rv is None and strVal.isdigit():
rv = self.GetElemByAid(Int2LL(int(strVal)))
return rv
def GetElem(self, aeName):
for elem in self.model_.applElems:
if aeName == elem.aeName:
return elem
return None
def GetElemB(self, beName):
for elem in self.model_.applElems:
if beName == elem.beName:
return elem
return None
def GetElemByAid(self, aid):
for elem in self.model_.applElems:
if LL_Equal(elem.aid, aid):
return elem
return None
def MapAttrNameToAaName(self, aeName, attribName):
attr = self.GetAttributeEx(aeName, attribName)
if not attr is None:
return attr.aaName
rel = self.GetRelationEx(aeName, attribName)
if not rel is None:
return rel.arName
raise Exception("Attribute '" + aeName + "." + attribName + "' does not exist")
def GetAttribute(self, aeName, aaName):
elem = self.GetElem(aeName)
for attr in elem.attributes:
if attr.aaName == aaName:
return attr
return None
def GetAttributeB(self, aeName, baName):
elem = self.GetElem(aeName)
for attr in elem.attributes:
if attr.baName == baName:
return attr
return None
def GetAttributeEx(self, aeName, attributeName):
rv = self.GetAttribute(aeName, attributeName)
if rv is None:
rv = self.GetAttributeB(aeName, attributeName)
if not rv is None:
return rv
return None
def GetRelationByAid(self, aid, arName):
for rel in self.model_.applRels:
if LL_Equal(rel.elem1, aid) and rel.arName == arName:
return rel
return None
def GetRelation(self, aeName, arName):
aid = self.Aid(aeName)
return self.GetRelationByAid(aid, arName)
def GetRelationEx(self, aeName, relationName):
rv = self.GetRelation(aeName, relationName)
if rv is None:
rv = self.GetRelationB(aeName, relationName)
if not rv is None:
return rv
return None
def FindInverseRelation(self, relation):
return self.GetRelationByAid(relation.elem2, relation.invName)
def GetRelationB(self, aeName, brName):
aid = self.Aid(aeName)
for rel in self.model_.applRels:
if LL_Equal(rel.elem1, aid) and rel.brName == brName:
return rel
return None
def GetNRelationNames(self, aeName):
rv = []
aid = self.Aid(aeName)
for rel in self.model_.applRels:
if LL_Equal(rel.elem1, aid) and 1 != rel.arRelationRange.max:
rv.append(rel.arName)
return rv
class CSession:
_aoSession = None
# Model members
_cModel = None
# access data
_applElemAccess = None
def __init__(self, params):
self._aoSession = _get_session(params)
if self._aoSession is None:
raise Exception("Retrieving session failed!")
self._cModel = CModel(self._aoSession)
self._applElemAccess = self._aoSession.getApplElemAccess()
def __del__(self):
try:
self.Close()
except:
logging.info("Exception occured when closing session")
def Model(self):
return self._cModel
def Close(self):
if not self._aoSession is None:
self._aoSession.close()
self._aoSession = None
def AsamPathCreate(self, aid, iid):
applStruct = self._aoSession.getApplicationStructure()
ae = applStruct.getElementById(aid)
ie = ae.getInstanceById(Int2LL(iid))
return ie.getAsamPath()
def AsamPathResolve(self, path):
applStruct = self._aoSession.getApplicationStructure()
ie = applStruct.getInstanceByAsamPath(path.encode('utf-8'))
iid = ie.getId()
entity = ie.getApplicationElement().getName()
return entity, LL2Int(iid)
def GetContext(self, pattern):
return self._aoSession.getContext(pattern.encode('utf-8'))
def SetContextString(self, varName, varValue):
self._aoSession.setContextString(varName.encode('utf-8'), varValue.encode('utf-8'))
def GetElementValues(self, aeName, conditionArray, attributeArray, orderByArray, groupByArray, how_many):
if conditionArray is None: conditionArray = []
if attributeArray is None: attributeArray = []
if orderByArray is None: orderByArray = []
if how_many is None: how_many = 0
applElem = self.Model().GetElem(aeName)
aid = applElem.aid
anuSeq = []
condSeq = []
joinSeq = []
orderBySeq = []
groupBySeq = []
if(0 == len(attributeArray)):
anuSeq.append(org.asam.ods.SelAIDNameUnitId(org.asam.ods.AIDName(aid, "*"), org.asam.ods.T_LONGLONG(0, 0), org.asam.ods.NONE))
else:
for attributeItem in attributeArray:
if("*" == attributeItem):
anuSeq.append(org.asam.ods.SelAIDNameUnitId(org.asam.ods.AIDName(aid, "*"), org.asam.ods.T_LONGLONG(0, 0), org.asam.ods.NONE))
else:
attribPath, aAggrType = ExtractAttributeNameFromColumnName(attributeItem)
aaType, aaName, aaApplElem = _parse_path_and_add_joins(self.Model(), applElem, attribPath, joinSeq)
anuSeq.append(org.asam.ods.SelAIDNameUnitId(org.asam.ods.AIDName(aaApplElem.aid, aaName), org.asam.ods.T_LONGLONG(0, 0), aAggrType))
for orderByItem in orderByArray:
attribPath, ascending = ExtractAttributeNameFromOrderByName(orderByItem)
aaType, aaName, aaApplElem = _parse_path_and_add_joins(self.Model(), applElem, attribPath, joinSeq)
orderBySeq.append(org.asam.ods.SelOrder(org.asam.ods.AIDName(aaApplElem.aid, aaName), ascending))
for attribPath in groupByArray:
aaType, aaName, aaApplElem = _parse_path_and_add_joins(self.Model(), applElem, attribPath, joinSeq)
groupBySeq.append(org.asam.ods.AIDName(aaApplElem.aid, aaName))
if(len(conditionArray) > 0):
expressionParser = re.compile(r'\s*(?P<attribute>.*?)\s*?(?P<operator>([!<>=][=]|[<>=]))\s*(?P<operand>.*)')
caseSensitive = 1
for part in conditionArray:
if '$cs' == part:
caseSensitive = 1
elif '$ci' == part:
caseSensitive = 0
elif '$open' == part:
selItem = org.asam.ods.SelItem(org.asam.ods.SEL_OPERATOR_TYPE, org.asam.ods.OPEN)
condSeq.append(selItem)
elif '$close' == part:
selItem = org.asam.ods.SelItem(org.asam.ods.SEL_OPERATOR_TYPE, org.asam.ods.CLOSE)
condSeq.append(selItem)
elif '$or' == part:
selItem = org.asam.ods.SelItem(org.asam.ods.SEL_OPERATOR_TYPE, org.asam.ods.OR)
condSeq.append(selItem)
elif '$and' == part:
selItem = org.asam.ods.SelItem(org.asam.ods.SEL_OPERATOR_TYPE, org.asam.ods.AND)
condSeq.append(selItem)
elif '$not' == part:
selItem = org.asam.ods.SelItem(org.asam.ods.SEL_OPERATOR_TYPE, org.asam.ods.NOT)
condSeq.append(selItem)
else:
#split condition
m = expressionParser.search(part)
attribPath = m.group("attribute")
operatorStr = m.group("operator")
operandStr = m.group("operand")
aaType, aaName, aaApplElem = _parse_path_and_add_joins(self.Model(), applElem, attribPath, joinSeq)
operator = None
if '=' == operatorStr:
if (org.asam.ods.DT_STRING == aaType or org.asam.ods.DS_STRING == aaType):
operator = org.asam.ods.LIKE if 1 == caseSensitive else org.asam.ods.CI_LIKE
else:
operator = org.asam.ods.EQ
elif '<>' == operatorStr:
if (org.asam.ods.DT_STRING == aaType or org.asam.ods.DS_STRING == aaType):
operator = org.asam.ods.NLIKE if 1 == caseSensitive else org.asam.ods.CI_NLIKE
else:
operator = org.asam.ods.NEQ
elif '==' == operatorStr: operator = org.asam.ods.EQ if ((org.asam.ods.DT_STRING == aaType or org.asam.ods.DS_STRING == aaType)) and 1 == caseSensitive else org.asam.ods.CI_EQ
elif '!=' == operatorStr: operator = org.asam.ods.NEQ if ((org.asam.ods.DT_STRING == aaType or org.asam.ods.DS_STRING == aaType)) and 1 == caseSensitive else org.asam.ods.CI_NEQ
elif '<' == operatorStr: operator = org.asam.ods.LT
elif '>' == operatorStr: operator = org.asam.ods.GT
elif '<=' == operatorStr: operator = org.asam.ods.LTE
elif '>=' == operatorStr: operator = org.asam.ods.GTE
else:
raise Exception("Query contains unknown operator '" + operatorStr + "'")
tsValue = CreateTsValue(aaType, operandStr)
selValExt = org.asam.ods.SelValueExt(org.asam.ods.AIDNameUnitId(org.asam.ods.AIDName(aaApplElem.aid, aaName), LL0()), operator, tsValue)
selItem = org.asam.ods.SelItem(org.asam.ods.SEL_VALUE_TYPE, selValExt)
condSeq.append(selItem)
return self.GetInstancesEx(org.asam.ods.QueryStructureExt(anuSeq, condSeq, joinSeq, orderBySeq, groupBySeq), how_many)
def GetInstancesEx(self, qse, how_many):
logging.info('Call ApplElemAccess.getInstancesExt(aoq="' + str(qse).replace('org.asam.ods.','') + '", how_many=' + str(how_many) + '")')
rs = self._applElemAccess.getInstancesExt(qse, how_many)
for r in rs:
return r.firstElems
return None
class CSessionAutoReconnect:
_cSession = None
_params = {}
def __init__(self, params):
self._cSession = CSession(params)
self._params = params
def __del__(self):
self._cSession = None
def Close(self):
# no need to reconnect
self._cSession.Close()
self._cSession = None
def Model(self):
# no need to reconnect
return self._CSession().Model()
def AsamPathCreate(self, aid, iid):
try:
return self._CSession().AsamPathCreate(aid, iid)
except (CORBA.OBJECT_NOT_EXIST, CORBA.COMM_FAILURE, CORBA.TRANSIENT, CORBA.INV_OBJREF):
return self._CSessionReconnect().AsamPathCreate(aid, iid)
def AsamPathResolve(self, path):
try:
return self._CSession().AsamPathResolve(path)
except (CORBA.OBJECT_NOT_EXIST, CORBA.COMM_FAILURE, CORBA.TRANSIENT, CORBA.INV_OBJREF):
return self._CSessionReconnect().AsamPathResolve(path)
def GetContext(self, pattern):
try:
return self._CSession().GetContext(pattern)
except (CORBA.OBJECT_NOT_EXIST, CORBA.COMM_FAILURE, CORBA.TRANSIENT, CORBA.INV_OBJREF):
return self._CSessionReconnect().GetContext(pattern)
def SetContextString(self, varName, varValue):
try:
self._CSession().SetContextString(varName, varValue)
except (CORBA.OBJECT_NOT_EXIST, CORBA.COMM_FAILURE, CORBA.TRANSIENT, CORBA.INV_OBJREF):
self._CSessionReconnect().SetContextString(varName, varValue)
def GetElementValues(self, aeName, conditionArray, attributeArray, orderByArray, groupByArray, how_many):
try:
return self._CSession().GetElementValues(aeName, conditionArray, attributeArray, orderByArray, groupByArray, how_many)
except (CORBA.OBJECT_NOT_EXIST, CORBA.COMM_FAILURE, CORBA.TRANSIENT, CORBA.INV_OBJREF):
return self._CSessionReconnect().GetElementValues(aeName, conditionArray, attributeArray, orderByArray, groupByArray, how_many)
def GetInstancesEx(self, qse, how_many):
try:
return self._CSession().GetInstancesEx(qse, how_many)
except (CORBA.OBJECT_NOT_EXIST, CORBA.COMM_FAILURE, CORBA.TRANSIENT, CORBA.INV_OBJREF):
return self._CSessionReconnect().GetInstancesEx(qse, how_many)
def _CSessionReconnect(self):
self._cSession = None
return self._CSession()
def _CSession(self):
if self._cSession is None:
self._cSession = CSession(self._params)
return self._cSession
|
<reponame>fuliucansheng/UniTorch
# Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
import torch
import torch.nn as nn
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from unitorch.score import (
accuracy_score,
recall_score,
f1_score,
bleu_score,
voc_map_score,
rouge1_score,
rouge2_score,
rougel_score,
roc_auc_score,
matthews_corrcoef,
pearsonr,
spearmanr,
)
from unitorch.cli import add_default_section_for_init, register_score
from unitorch.cli.models import (
BaseOutputs,
BaseTargets,
ClassificationOutputs,
ClassificationTargets,
GenerationOutputs,
GenerationTargets,
DetectionOutputs,
DetectionTargets,
SegmentationOutputs,
SegmentationTargets,
LossOutputs,
)
class Score(nn.Module):
pass
@register_score("core/score/acc")
class AccuracyScore(Score):
def __init__(self, gate: float = 0.5):
super().__init__()
self.gate = gate
@classmethod
@add_default_section_for_init("core/score/acc")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[ClassificationOutputs, GenerationOutputs, SegmentationOutputs],
targets: Union[ClassificationTargets, GenerationTargets, SegmentationTargets],
):
if hasattr(outputs, "outputs"):
outputs = outputs.outputs
if hasattr(targets, "targets"):
targets = targets.targets
if isinstance(outputs, GenerationOutputs):
outputs = outputs.sequences
outputs = outputs.view(-1, output.size(-1))
if isinstance(targets, GenerationTargets):
targets = targets.refs
targets = targets.view(-1)
if isinstance(outputs, SegmentationOutputs):
outputs = outputs.masks
outputs = torch.cat([t.view(-1) for t in outputs])
if isinstance(targets, SegmentationTargets):
targets = targets.targets
targets = torch.cat([t.view(-1) for t in targets])
if outputs.dim() == 2:
outputs = outputs.argmax(dim=-1) if outputs.size(-1) > 1 else outputs[:, 0] > self.gate
if targets.dim() == 2 and targets.size(-1) == 1:
targets = targets[:, 0]
assert outputs.dim() == 1 and targets.dim() == 1
return accuracy_score(targets, outputs)
@register_score("core/score/rec")
class RecallScore(Score):
def __init__(self, gate: float = 0.5):
super().__init__()
self.gate = gate
@classmethod
@add_default_section_for_init("core/score/rec")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, ClassificationOutputs],
targets: Union[BaseTargets, ClassificationTargets],
):
if hasattr(outputs, "outputs"):
outputs = outputs.outputs
if hasattr(targets, "targets"):
targets = targets.targets
if outputs.dim() == 2:
outputs = outputs.argmax(dim=-1) if outputs.size(-1) > 1 else outputs[:, 0] > self.gate
if targets.dim() == 2 and targets.size(-1) == 1:
targets = targets[:, 0]
assert outputs.dim() == 1 and targets.dim() == 1
return recall_score(targets, outputs)
@register_score("core/score/f1")
class F1Score(Score):
def __init__(self, gate: float = 0.5):
super().__init__()
self.gate = gate
@classmethod
@add_default_section_for_init("core/score/f1")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, ClassificationOutputs],
targets: Union[BaseTargets, ClassificationTargets],
):
if hasattr(outputs, "outputs"):
outputs = outputs.outputs
if hasattr(targets, "targets"):
targets = targets.targets
if outputs.dim() == 2:
outputs = outputs.argmax(dim=-1) if outputs.size(-1) > 1 else outputs[:, 0] > self.gate
if targets.dim() == 2 and targets.size(-1) == 1:
targets = targets[:, 0]
assert outputs.dim() == 1 and targets.dim() == 1
return f1_score(targets, outputs)
@register_score("core/score/auc")
class AUCScore(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/auc")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, ClassificationOutputs],
targets: Union[BaseTargets, ClassificationTargets],
):
if hasattr(outputs, "outputs"):
outputs = outputs.outputs
if hasattr(targets, "targets"):
targets = targets.targets
if outputs.dim() == 2:
outputs = outputs[:, 1] if outputs.size(-1) > 1 else outputs[:, 0]
if targets.dim() == 2 and targets.size(-1) == 1:
targets = targets[:, 0]
assert outputs.dim() == 1 and targets.dim() == 1
return roc_auc_score(targets, outputs)
@register_score("core/score/mattcorr")
class MattCorrScore(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/mattcorr")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, ClassificationOutputs],
targets: Union[BaseTargets, ClassificationTargets],
):
if hasattr(outputs, "outputs"):
outputs = outputs.outputs
if hasattr(targets, "targets"):
targets = targets.targets
if outputs.dim() == 2:
outputs = outputs.argmax(dim=-1) if outputs.size(-1) > 1 else outputs[:, 0] > self.gate
if targets.dim() == 2 and targets.size(-1) == 1:
targets = targets[:, 0]
assert outputs.dim() == 1 and targets.dim() == 1
return matthews_corrcoef(targets, outputs)
@register_score("core/score/pearsonr_corr")
class PearsonrCorrScore(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/pearsonr_corr")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, ClassificationOutputs],
targets: Union[BaseTargets, ClassificationTargets],
):
if hasattr(outputs, "outputs"):
outputs = outputs.outputs
if hasattr(targets, "targets"):
targets = targets.targets
if outputs.dim() == 2 and outputs.size(-1) == 1:
outputs = outputs[:, 0]
if targets.dim() == 2 and targets.size(-1) == 1:
targets = targets[:, 0]
assert outputs.dim() == 1 and targets.dim() == 1
return pearsonr(targets, outputs)[0]
@register_score("core/score/spearmanr_corr")
class SpearmanrCorrScore(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/spearmanr_corr")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, ClassificationOutputs],
targets: Union[BaseTargets, ClassificationTargets],
):
if hasattr(outputs, "outputs"):
outputs = outputs.outputs
if hasattr(targets, "targets"):
targets = targets.targets
if outputs.dim() == 2 and outputs.size(-1) == 1:
outputs = outputs[:, 0]
if targets.dim() == 2 and targets.size(-1) == 1:
targets = targets[:, 0]
assert outputs.dim() == 1 and targets.dim() == 1
return spearmanr(targets, outputs)[0]
@register_score("core/score/bleu")
class BleuScore(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/bleu")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, GenerationOutputs],
targets: Union[BaseTargets, GenerationTargets],
):
if hasattr(outputs, "sequences"):
outputs = outputs.sequences
if hasattr(targets, "refs"):
targets = targets.refs
return bleu_score(
targets.long(),
outputs.long(),
ignore_tokens=[0, 1],
)
@register_score("core/score/rouge1")
class Rouge1Score(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/rouge1")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, GenerationOutputs],
targets: Union[BaseTargets, GenerationTargets],
):
if hasattr(outputs, "sequences"):
outputs = outputs.sequences
if hasattr(targets, "refs"):
targets = targets.refs
return rouge1_score(
targets.long(),
outputs.long(),
ignore_tokens=[0, 1],
)["f1"]
@register_score("core/score/rouge2")
class Rouge2Score(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/rouge2")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, GenerationOutputs],
targets: Union[BaseTargets, GenerationTargets],
):
if hasattr(outputs, "sequences"):
outputs = outputs.sequences
if hasattr(targets, "refs"):
targets = targets.refs
return rouge2_score(
targets.long(),
outputs.long(),
ignore_tokens=[0, 1],
)["f1"]
@register_score("core/score/rougel")
class RougelScore(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/rougel")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, GenerationOutputs],
targets: Union[BaseTargets, GenerationTargets],
):
if hasattr(outputs, "sequences"):
outputs = outputs.sequences
if hasattr(targets, "refs"):
targets = targets.refs
return rougel_score(
targets.long(),
outputs.long(),
ignore_tokens=[0, 1],
)["f1"]
@register_score("core/score/loss")
class LossScore(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/rougel")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[BaseOutputs, LossOutputs],
targets: Union[BaseTargets, GenerationTargets],
):
if hasattr(outputs, "loss"):
loss = outputs.loss
return -float(torch.mean(loss))
@register_score("core/score/voc_map")
class VOCMAPScore(Score):
def __init__(
self,
):
super().__init__()
@classmethod
@add_default_section_for_init("core/score/voc_map")
def from_core_configure(cls, config, **kwargs):
pass
def forward(
self,
outputs: Union[DetectionOutputs],
targets: Union[DetectionTargets],
):
p_bboxes = outputs.bboxes
p_scores = outputs.scores
p_classes = outputs.classes
gt_bboxes = targets.bboxes
gt_classes = targets.classes
return voc_map_score(
p_bboxes=[t.numpy() for t in p_bboxes],
p_scores=[t.numpy() for t in p_scores],
p_classes=[t.numpy() for t in p_classes],
gt_bboxes=[t.numpy() for t in gt_bboxes],
gt_classes=[t.numpy() for t in gt_classes],
)
|
from pyradioconfig.calculator_model_framework.interfaces.iphy import IPhy
class PHYS_IEEE802154_WiSUN_Ocelot(IPhy):
### EU Region ###
# Owner: <NAME>
# JIRA Link: https://jira.silabs.com/browse/PGOCELOTVALTEST-166
def PHY_IEEE802154_WISUN_868MHz_2GFSK_50kbps_1a_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, EU-868MHz, 1a (2FSK 50kbps mi=0.5)', phy_name=phy_name)
#Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1a
#Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 863100000 # FAN EU Mode #1a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 100000 # FAN EU Mode #1a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # FAN EU Mode #1a, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
#Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
#Apps-verified
def PHY_IEEE802154_WISUN_873MHz_2GFSK_50kbps_1a_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, EU-873MHz, 1a (2FSK 50kbps mi=0.5)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1a
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 870100000 # FAN EU Mode #1a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 100000 # FAN EU Mode #1a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # FAN EU Mode #1a, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
# Owner: <NAME>
# JIRA Link: https://jira.silabs.com/browse/PGOCELOTVALTEST-165
def PHY_IEEE802154_WISUN_868MHz_2GFSK_100kbps_2a_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, EU-868MHz, 2a (2FSK 100kbps mi=0.5)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2a
#Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 863100000 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
# Apps-verified
def PHY_IEEE802154_WISUN_873MHz_2GFSK_100kbps_2a_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, EU-873MHz, 2a (2FSK 100kbps mi=0.5)',
phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2a
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 870200000 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
# Apps-verified
def PHY_IEEE802154_WISUN_868MHz_2GFSK_150kbps_3_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, EU-868MHz, 3 (2FSK 150kbps mi=0.5)',
phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode3
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 863100000 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 12 * 8 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
# Apps-verified
def PHY_IEEE802154_WISUN_873MHz_2GFSK_150kbps_3_EU(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, EU-873MHz, 3 (2FSK 150kbps mi=0.5)',
phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode3
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 870200000 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 12 * 8 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
### NA Region ###
# Owner: <NAME>
# JIRA Link: https://jira.silabs.com/browse/PGOCELOTVALTEST-168
def PHY_IEEE802154_WISUN_915MHz_2GFSK_50kbps_1b_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, NA-915MHz, 1b (2FSK 50kbps mi=1.0)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1b
#Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 902200000 # FAN NA Mode #1b, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # FAN NA Mode #1b, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # FAN NA Mode #1b, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
# Apps-verified
def PHY_IEEE802154_WISUN_915MHz_2GFSK_100kbps_2a_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN,
readable_name='Wi-SUN FAN, NA-915MHz, 2a (2FSK 100kbps mi=0.5)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2a
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 902200000 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # FAN EU Mode #2a, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
# Owner: <NAME>
# JIRA Link: https://jira.silabs.com/browse/PGOCELOTVALTEST-167
def PHY_IEEE802154_WISUN_915MHz_2GFSK_150kbps_3_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, NA-915MHz, 3 (2FSK 150kbps mi=0.5)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode3
#Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 902400000 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 400000 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 12 * 8 # FAN NA Mode #3, WiSUN 20140727-PHY-Profile Table 6
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
#Apps-verified
def PHY_IEEE802154_WISUN_915MHz_2GFSK_200kbps_4a_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN,
readable_name='Wi-SUN FAN, NA-915MHz, 4a (2GFSK 200kbps mi=0.5)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode4a
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 902400000
phy.profile_inputs.channel_spacing_hz.value = 400000
phy.profile_inputs.preamble_length.value = 12 * 8
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
# Apps-verified
def PHY_IEEE802154_WISUN_915MHz_2GFSK_300kbps_5_NA(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN,
readable_name='Wi-SUN FAN, NA-915MHz, 5 (2GFSK 300kbps mi=0.5)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode5
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 902600000
phy.profile_inputs.channel_spacing_hz.value = 600000
phy.profile_inputs.preamble_length.value = 24 * 8
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
### JP Region ###
# Owner: <NAME>
# JIRA Link: https://jira.silabs.com/browse/PGOCELOTVALTEST-170
def PHY_IEEE802154_WISUN_920MHz_2GFSK_50kbps_1b_JP_ECHONET(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN ECHONET, JP-920MHz, 1b (2FSK 50kbps mi=1.0)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1b
#Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 920600000 # Echonet JP Mode #1b, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # Echonet JP Mode #1b, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # Echonet JP Mode #1b, WiSUN 20140727-PHY-Profile Table 5
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.CCITT_16 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
# Owner: <NAME>
# JIRA Link: https://jira.silabs.com/browse/PGOCELOTVALTEST-169
def PHY_IEEE802154_WISUN_920MHz_2GFSK_100kbps_2b_JP_ECHONET(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN ECHONET, JP-920MHz, 2b (2FSK 100kbps mi=1.0)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2b
#Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 920900000 # Echonet JP Mode #2b, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 400000 # Echonet JP Mode #2b, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 15 * 8 # Echonet JP Mode #2b, WiSUN 20140727-PHY-Profile Table 5
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.CCITT_16 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
#Apps-verified
def PHY_IEEE802154_WISUN_920MHz_2GFSK_100kbps_2b_JP(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, JP-920MHz, 2b (2FSK 100kbps mi=1.0)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2b
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 920900000
phy.profile_inputs.channel_spacing_hz.value = 400000
phy.profile_inputs.preamble_length.value = 8 * 8
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
#Apps-verified
def PHY_IEEE802154_WISUN_920MHz_2GFSK_200kbps_4b_JP(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN,
readable_name='Wi-SUN FAN, JP-920MHz, 4b (2GFSK 200kbps mi=1.0)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode4b
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 920800000
phy.profile_inputs.channel_spacing_hz.value = 600000
phy.profile_inputs.preamble_length.value = 12 * 8
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
### CN Region ###
# Owner: <NAME>
# JIRA Link: https://jira.silabs.com/browse/PGOCELOTVALTEST-1218
def PHY_IEEE802154_WISUN_470MHz_2GFSK_50kbps_1b_CN(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, CN-470MHz, 1b (2FSK 50kbps mi=1.0)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode1b
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 470200000 # FAN CN Mode #1b, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # FAN CN Mode #1b, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # FAN CN Mode #1b, WiSUN 20140727-PHY-Profile Table 5
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy
# Owner: <NAME>
# JIRA Link: https://jira.silabs.com/browse/PGOCELOTVALTEST-1219
def PHY_IEEE802154_WISUN_470MHz_2GFSK_100kbps_2a_CN(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.WiSUN, readable_name='Wi-SUN FAN, CN-470MHz, 2a (2FSK 100kbps mi=0.5)', phy_name=phy_name)
# Select the correct SUNFSK mode
phy.profile_inputs.wisun_mode.value = model.vars.wisun_mode.var_enum.Mode2a
# Define WiSUN Profile / Region specific inputs
phy.profile_inputs.base_frequency_hz.value = 470200000 # FAN CN Mode #2a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.channel_spacing_hz.value = 200000 # FAN CN Mode #2a, WiSUN 20140727-PHY-Profile Table 3
phy.profile_inputs.preamble_length.value = 8 * 8 # FAN CN Mode #2a, WiSUN 20140727-PHY-Profile Table 5
phy.profile_inputs.crc_poly.value = model.vars.crc_poly.var_enum.ANSIX366_1979 # 802.15.4-2015, 7.2.10
# Default xtal frequency of 39MHz
phy.profile_inputs.xtal_frequency_hz.value = 39000000
return phy |
# Copyright 2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
A Pythonic API to interact with the charm hook environment.
:author: <NAME> <<EMAIL>>
'''
import six
from charmhelpers.core import hookenv
from collections import OrderedDict
if six.PY3:
from collections import UserDict # pragma: nocover
else:
from UserDict import IterableUserDict as UserDict # pragma: nocover
class Relations(OrderedDict):
'''Mapping relation name -> relation id -> Relation.
>>> rels = Relations()
>>> rels['sprog']['sprog:12']['client/6']['widget']
'remote widget'
>>> rels['sprog']['sprog:12'].local['widget'] = 'local widget'
>>> rels['sprog']['sprog:12'].local['widget']
'local widget'
>>> rels.peer.local['widget']
'local widget on the peer relation'
'''
def __init__(self):
super(Relations, self).__init__()
for relname in sorted(hookenv.relation_types()):
self[relname] = OrderedDict()
relids = hookenv.relation_ids(relname)
relids.sort(key=lambda x: int(x.split(':', 1)[-1]))
for relid in relids:
self[relname][relid] = Relation(relid)
@property
def peer(self):
peer_relid = hookenv.peer_relation_id()
for rels in self.values():
if peer_relid in rels:
return rels[peer_relid]
class Relation(OrderedDict):
'''Mapping of unit -> remote RelationInfo for a relation.
This is an OrderedDict mapping, ordered numerically by
by unit number.
Also provides access to the local RelationInfo, and peer RelationInfo
instances by the 'local' and 'peers' attributes.
>>> r = Relation('sprog:12')
>>> r.keys()
['client/9', 'client/10'] # Ordered numerically
>>> r['client/10']['widget'] # A remote RelationInfo setting
'remote widget'
>>> r.local['widget'] # The local RelationInfo setting
'local widget'
'''
relid = None # The relation id.
relname = None # The relation name (also known as relation type).
service = None # The remote service name, if known.
local = None # The local end's RelationInfo.
peers = None # Map of peer -> RelationInfo. None if no peer relation.
def __init__(self, relid):
remote_units = hookenv.related_units(relid)
remote_units.sort(key=lambda u: int(u.split('/', 1)[-1]))
super(Relation, self).__init__((unit, RelationInfo(relid, unit))
for unit in remote_units)
self.relname = relid.split(':', 1)[0]
self.relid = relid
self.local = RelationInfo(relid, hookenv.local_unit())
for relinfo in self.values():
self.service = relinfo.service
break
# If we have peers, and they have joined both the provided peer
# relation and this relation, we can peek at their data too.
# This is useful for creating consensus without leadership.
peer_relid = hookenv.peer_relation_id()
if peer_relid and peer_relid != relid:
peers = hookenv.related_units(peer_relid)
if peers:
peers.sort(key=lambda u: int(u.split('/', 1)[-1]))
self.peers = OrderedDict((peer, RelationInfo(relid, peer))
for peer in peers)
else:
self.peers = OrderedDict()
else:
self.peers = None
def __str__(self):
return '{} ({})'.format(self.relid, self.service)
class RelationInfo(UserDict):
'''The bag of data at an end of a relation.
Every unit participating in a relation has a single bag of
data associated with that relation. This is that bag.
The bag of data for the local unit may be updated. Remote data
is immutable and will remain static for the duration of the hook.
Changes made to the local units relation data only become visible
to other units after the hook completes successfully. If the hook
does not complete successfully, the changes are rolled back.
Unlike standard Python mappings, setting an item to None is the
same as deleting it.
>>> relinfo = RelationInfo('db:12') # Default is the local unit.
>>> relinfo['user'] = 'fred'
>>> relinfo['user']
'fred'
>>> relinfo['user'] = None
>>> 'fred' in relinfo
False
This class wraps hookenv.relation_get and hookenv.relation_set.
All caching is left up to these two methods to avoid synchronization
issues. Data is only loaded on demand.
'''
relid = None # The relation id.
relname = None # The relation name (also know as the relation type).
unit = None # The unit id.
number = None # The unit number (integer).
service = None # The service name.
def __init__(self, relid, unit):
self.relname = relid.split(':', 1)[0]
self.relid = relid
self.unit = unit
self.service, num = self.unit.split('/', 1)
self.number = int(num)
def __str__(self):
return '{} ({})'.format(self.relid, self.unit)
@property
def data(self):
return hookenv.relation_get(rid=self.relid, unit=self.unit)
def __setitem__(self, key, value):
if self.unit != hookenv.local_unit():
raise TypeError('Attempting to set {} on remote unit {}'
''.format(key, self.unit))
if value is not None and not isinstance(value, six.string_types):
# We don't do implicit casting. This would cause simple
# types like integers to be read back as strings in subsequent
# hooks, and mutable types would require a lot of wrapping
# to ensure relation-set gets called when they are mutated.
raise ValueError('Only string values allowed')
hookenv.relation_set(self.relid, {key: value})
def __delitem__(self, key):
# Deleting a key and setting it to null is the same thing in
# Juju relations.
self[key] = None
class Leader(UserDict):
def __init__(self):
pass # Don't call superclass initializer, as it will nuke self.data
@property
def data(self):
return hookenv.leader_get()
def __setitem__(self, key, value):
if not hookenv.is_leader():
raise TypeError('Not the leader. Cannot change leader settings.')
if value is not None and not isinstance(value, six.string_types):
# We don't do implicit casting. This would cause simple
# types like integers to be read back as strings in subsequent
# hooks, and mutable types would require a lot of wrapping
# to ensure leader-set gets called when they are mutated.
raise ValueError('Only string values allowed')
hookenv.leader_set({key: value})
def __delitem__(self, key):
# Deleting a key and setting it to null is the same thing in
# Juju leadership settings.
self[key] = None
|
import cluster_generator as cg
import unyt as u
from numpy.random import RandomState
import numpy as np
# Note that cluster_generator does not use unyt units for speed and simplicity,
# so mass units are Msun, length units are kpc, and time units are Myr
# Put the two clusters at a redshift z = 0.1
z = 0.1
# M200 for both clusters
M200_1 = 6.0e14 # in Msun
M200_2 = 2.0e14 # in Msun
conc = 4.0 # A good approximation to the concentration parameter for both clusters
# Find r200 for both clusters
r200_1 = cg.find_overdensity_radius(M200_1, 200.0, z=z)
r200_2 = cg.find_overdensity_radius(M200_2, 200.0, z=z)
# Scale radii to be used for the sNFW profiles
a1 = r200_1/conc
a2 = r200_2/conc
# For the total mass density profile, we will use a "super-NFW" profile, which
# is very similar to the NFW profile but falls off slightly faster (<NAME>.,
# <NAME>., & <NAME>. 2018, MNRAS)
# Determine the total mass for each sNFW profile
M1 = cg.snfw_total_mass(M200_1, r200_1, a1)
M2 = cg.snfw_total_mass(M200_2, r200_2, a2)
# Use this total mass to construct total mass profiles for each cluster
Mt1 = cg.snfw_mass_profile(M1, a1)
Mt2 = cg.snfw_mass_profile(M2, a2)
# Use the total mass profiles to determine r500/M500 and r2500/M2500 for
# each cluster
r500_1, M500_1 = cg.find_radius_mass(Mt1, z=z, delta=500.0)
r2500_1, M2500_1 = cg.find_radius_mass(Mt1, z=z, delta=2500.0)
r500_2, M500_2 = cg.find_radius_mass(Mt2, z=z, delta=500.0)
r2500_2, M2500_2 = cg.find_radius_mass(Mt2, z=z, delta=2500.0)
# Total mass density profiles for each cluster
rhot1 = cg.snfw_density_profile(M1, a1)
rhot2 = cg.snfw_density_profile(M2, a2)
# Sprinkle some stars in--2% of the total mass for each cluster
rhos1 = 0.02*rhot1
rhos2 = 0.02*rhot2
# Find the gas mass fraction within R500 (using the relationship between
# M500 and fgas from <NAME>., et al. 2009, ApJ, 692, 1033
f_g1 = cg.f_gas(M500_1)
f_g2 = cg.f_gas(M500_2)
# This sets the gas density profile using the functional form from <NAME>.,
# <NAME>., <NAME>., et al. 2006, ApJ, 640, 691 for the first cluster. We
# set the scale density to 1.0 first and will rescale it in the next line by the
# gas mass within r500
rhog1 = cg.vikhlinin_density_profile(1.0, 0.2*r2500_1, 0.67*r200_1, 1.0, 0.67, 3.0)
rhog1 = cg.rescale_profile_by_mass(rhog1, f_g1*M500_1, r500_1)
# Same as above for the second cluster
rhog2 = cg.vikhlinin_density_profile(1.0, 0.2*r2500_2, 0.67*r200_2, 1.0, 0.67, 3.0)
rhog2 = cg.rescale_profile_by_mass(rhog2, f_g2*M500_2, r500_2)
# This is the plasma beta parameter for the ratio of the thermal pressure to the
# magnetic pressure
beta = 100.0
# This sets up the profiles for the first cluster assuming hydrostatic equilibrium,
# taking the gas density, total mass density, and stellar density as input
hse1 = cg.ClusterModel.from_dens_and_tden(0.1, 20000.0, rhog1, rhot1,
stellar_density=rhos1)
# This sets a radial magnetic field strength profile using the beta parameter and
# the pressure in the profile, assuming p_B = B^2/s (thus gaussian=False)
hse1.set_magnetic_field_from_beta(beta, gaussian=False)
# These lines are the same as above for the second cluster
hse2 = cg.ClusterModel.from_dens_and_tden(0.1, 20000.0, rhog2, rhot2,
stellar_density=rhos2)
hse2.set_magnetic_field_from_beta(beta, gaussian=False)
# Write the profiles for each cluster to files
hse1.write_model_to_h5("profile1.h5", overwrite=True)
hse2.write_model_to_h5("profile2.h5", overwrite=True)
# Set a random number generator for the generation of the magnetic field
# vector potential in 3D
prng = RandomState(24)
# This is the width of the GAMER simulation box and its center
w = 15000.0 # in kpc
center = np.array([0.5*w]*3)
# This determines the centers of the clusters, assuming a distance of
# 3 Mpc and zero impact parameter, centered on the box center
d = 3000.0 # in kpc
b = 0.0 # in kpc
center1, center2 = cg.compute_centers_for_binary(center, d, b)
# This sets up a 3D magnetic vector potential which GAMER will take the curl
# of on the AMR grid to get the initial B-field. It is a tangled field which
# uses a Kolmogorov spectrum with a large-scale cutoff of 500 kpc, a
# small-scale cutoff of 10 kpc, and is proportional on average to the pressure
# everywhere (given by the magnetic field profile of the clusters from above).
# Outside of r_max = 5000.0 kpc from each cluster center the average B-field
# is constant
left_edge = center-0.5*w
right_edge = center+0.5*w
dims = (256,)*3
bfield = cg.RadialRandomMagneticVectorPotential(left_edge, right_edge, dims,
10.0, 500.0, center1,
"profile1.h5", ctr2=center2,
profile2="profile2.h5", r_max=5000.0)
# Write the 3D vector potential to the B_IC file
bfield.write_to_h5("B_IC", overwrite=True, length_unit="Mpc",
field_unit="sqrt(1e14*Msun/Mpc**3)*Mpc/(10*Gyr)")
# We now set up the velocities of the two clusters. Assume 1500 km/s
# relative velocity, and then use the M200 of the two clusters to
# set velocity vectors in roughly the CM frame. The velocity is in
# the x-direction only
velocity = (1500.0*u.km/u.s).to_value("kpc/Myr")
velocity1 = np.array([velocity*M200_2/(M200_1+M200_2), 0.0, 0.0])
velocity2 = np.array([-velocity*M200_1/(M200_1+M200_2), 0.0, 0.0])
# Now we set up the cluster initial conditions. use 2e6 DM particles,
# 4e4 star particles. At r_max = 5000.0 kpc, the profiles of each cluster
# are constant
num_particles = {"dm": 2_000_000, "star": 40_000}
ics = cg.ClusterICs("1to3_b0.0", 2, ["profile1.h5", "profile2.h5"],
[center1, center2], [velocity1, velocity2],
num_particles=num_particles, mag_file="B_IC", r_max=5000.0)
# This writes the GAMER-specific IC files that are needed, generates
# the particles, and prints out the contents of the Input__TestProb
# file which should be used
cg.setup_gamer_ics(ics)
|
<reponame>zeroSteiner/protocon
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# protocon/utilities.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import ast
import collections
import functools
import ipaddress
import re
import socket
AddrInfo = collections.namedtuple('AddrInfo', ('family', 'type', 'proto', 'canonname', 'sockaddr'))
_SockAddr4 = collections.namedtuple('_SockAddr4', ('address', 'port'))
_SockAddr6 = collections.namedtuple('_SockAddr6', ('address', 'port', 'flow_info', 'socpe_id'))
def getaddrinfos(host, port=0, family=0, type=0, proto=0, flags=0):
"""
Return the results from :py:func:`socket.getaddrinfo` but as a tuple of
:py:class:`.AddrInfo` objects for easier readability.
:return: A tuple of :py:class:`.AddrInfo` objects.
:rtype: tuple
"""
infos = []
for result in socket.getaddrinfo(host, port, family=family, type=type, proto=proto, flags=flags):
family, type, proto, canonname, sockaddr = result
if family == socket.AF_INET:
sockaddr = _SockAddr4(*sockaddr)
elif family == socket.AF_INET6:
sockaddr = _SockAddr6(*sockaddr)
infos.append(AddrInfo(family, type, proto, canonname, sockaddr))
return tuple(infos)
def _literal_type(type_, value):
try:
value = ast.literal_eval(str(value))
except (SyntaxError, ValueError):
raise TypeError('value is not a ' + type_.__name__) from None
if not isinstance(value, type_):
raise TypeError('value is not a ' + type_.__name__)
return value
def literal_type(type_):
return functools.partial(_literal_type, type_)
class NetworkLocation(object):
__slots__ = ('address', 'port')
def __init__(self, address, port):
self.address = ipaddress.ip_address(address)
self.port = port
def __repr__(self):
return "{}({!r}, {!r})".format(self.__class__.__name__, self.address, self.port)
def __str__(self):
address = self.address
if self.port:
if isinstance(address, ipaddress.IPv6Address):
address = '[' + str(address) + ']'
address = str(address) + ':' + str(self.port)
return str(address)
@classmethod
def from_string(cls, string, default_port=0):
ipv4_regex = '(25[0-5]|2[0-4]\d|1\d\d|\d{1,2})(\.(25[0-5]|2[0-4]\d|1\d\d|\d{1,2})){3}'
ipv6_regex = '[0-9a-f]{0,4}(:[0-9a-f]{0,4}){2,7}'
regex = r'^(?P<bracket>\[)?'
regex += '(?P<location>' + '|'.join([ipv4_regex, ipv6_regex]) + ')'
regex += '(?(bracket)\]:(?=\d)|(:(?=\d)|$))(?P<port>(?<=:)\d+)?$'
match = re.match(regex, string, flags=re.IGNORECASE)
if match is None:
raise ValueError('invalid network location specified')
port = int(match.group('port')) if match.group('port') else default_port
return cls(match.group('location'), port)
def to_address(self):
return (str(self.address), self.port) |
from unittest import TestCase, mock
from basketball_reference_web_scraper.data import OutputWriteOption
from basketball_reference_web_scraper.writers import JSONWriter
class TestJSONWriter(TestCase):
def setUp(self):
self.mock_encoder = mock.Mock()
self.mock_data = ["some data"]
self.writer = JSONWriter(encoder=self.mock_encoder)
def test_default_options(self):
self.assertEqual({"sort_keys": True, "indent": 4}, JSONWriter.DEFAULT_OPTIONS)
@mock.patch("basketball_reference_web_scraper.writers.json.dumps")
def test_writing_to_memory_with_default_options(self, json_dumps):
options = mock.Mock(
custom_options=None,
should_write_to_file=mock.Mock(return_value=False),
)
self.writer.write(data=self.mock_data, options=options)
options.should_write_to_file.assert_called_once_with()
json_dumps.assert_called_once_with(self.mock_data, cls=self.mock_encoder, sort_keys=True, indent=4)
@mock.patch("basketball_reference_web_scraper.writers.json.dumps")
def test_writing_to_memory_with_custom_options(self, json_dumps):
options = mock.Mock(
custom_options={
"jae": "baebae",
"bae": "jadley",
},
should_write_to_file=mock.Mock(return_value=False),
)
self.writer.write(data=self.mock_data, options=options)
options.should_write_to_file.assert_called_once_with()
json_dumps.assert_called_once_with(
self.mock_data,
cls=self.mock_encoder,
sort_keys=True,
indent=4,
jae="baebae",
bae="jadley",
)
@mock.patch("basketball_reference_web_scraper.writers.json.dump")
def test_writing_to_file_with_default_options(self, json_dump):
file_path = "some file path"
with mock.patch("builtins.open", mock.mock_open()) as mock_file:
options = mock.Mock(
file_path=file_path,
mode=OutputWriteOption.WRITE,
custom_options=None,
should_write_to_file=mock.Mock(return_value=True),
)
self.writer.write(data=self.mock_data, options=options)
options.should_write_to_file.assert_called_once_with()
mock_file.assert_called_once_with(file_path, OutputWriteOption.WRITE.value, newline="", encoding="utf8")
json_dump.assert_called_once_with(
self.mock_data,
mock_file(),
cls=self.mock_encoder,
sort_keys=True,
indent=4,
)
@mock.patch("basketball_reference_web_scraper.writers.json.dump")
def test_writing_to_file_with_custom_options(self, json_dump):
file_path = "some file path"
with mock.patch("builtins.open", mock.mock_open()) as mock_file:
options = mock.Mock(
file_path=file_path,
mode=OutputWriteOption.WRITE,
custom_options={
"jae": "baebae",
"bae": "jadley",
},
should_write_to_file=mock.Mock(return_value=True),
)
self.writer.write(data=self.mock_data, options=options)
options.should_write_to_file.assert_called_once_with()
mock_file.assert_called_once_with(file_path, OutputWriteOption.WRITE.value, newline="", encoding="utf8")
json_dump.assert_called_once_with(
self.mock_data,
mock_file(),
cls=self.mock_encoder,
sort_keys=True,
indent=4,
jae="baebae",
bae="jadley",
)
|
# Generated by Django 3.0.8 on 2020-07-08 22:03
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import jobs.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
('django_celery_beat', '0012_periodictask_expire_seconds'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.CharField(default=jobs.models.generate_job_id, editable=False, help_text='The unique id of the job.', max_length=32, primary_key=True, serialize=False)),
('description', models.TextField(blank=True, help_text='A short description of the job.', null=True)),
('created', models.DateTimeField(auto_now_add=True, help_text='The time the job was created.')),
('updated', models.DateTimeField(auto_now=True, help_text='The time the job was last updated.')),
('began', models.DateTimeField(help_text='The time the job began.', null=True)),
('ended', models.DateTimeField(help_text='The time the job ended.', null=True)),
('status', models.CharField(blank=True, choices=[('WAITING', 'WAITING'), ('DISPATCHED', 'DISPATCHED'), ('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('STARTED', 'STARTED'), ('RUNNING', 'RUNNING'), ('SUCCESS', 'SUCCESS'), ('FAILURE', 'FAILURE'), ('CANCELLED', 'CANCELLED'), ('REVOKED', 'REVOKED'), ('TERMINATED', 'TERMINATED'), ('REJECTED', 'REJECTED'), ('RETRY', 'RETRY')], help_text='The current status of the job.', max_length=32, null=True)),
('is_active', models.BooleanField(db_index=True, default=False, help_text='Is the job active?')),
('method', models.CharField(choices=[('parallel', 'parallel'), ('series', 'series'), ('chain', 'chain'), ('pull', 'pull'), ('push', 'push'), ('copy', 'copy'), ('decode', 'decode'), ('encode', 'encode'), ('convert', 'convert'), ('compile', 'compile'), ('build', 'build'), ('execute', 'execute'), ('session', 'session'), ('sleep', 'sleep')], help_text='The job method.', max_length=32)),
('params', models.JSONField(blank=True, help_text='The parameters of the job; a JSON object.', null=True)),
('result', models.JSONField(blank=True, help_text='The result of the job; a JSON value.', null=True)),
('error', models.JSONField(blank=True, help_text='Any error associated with the job; a JSON object with type, message etc.', null=True)),
('log', models.JSONField(blank=True, help_text='The job log; a JSON array of log objects, including any errors.', null=True)),
('runtime', models.FloatField(blank=True, help_text='The running time of the job.', null=True)),
('url', models.CharField(blank=True, help_text='The URL of the job on the local network; can be used to interact with it.', max_length=256, null=True)),
('worker', models.CharField(blank=True, help_text='The identifier of the worker that ran the job.', max_length=64, null=True)),
('retries', models.IntegerField(blank=True, help_text='The number of retries to fulfil the job.', null=True)),
('callback_id', models.CharField(blank=True, help_text='The id of the object to call back.', max_length=256, null=True)),
('callback_method', models.CharField(blank=True, help_text='The name of the method to call back.', max_length=128, null=True)),
],
),
migrations.CreateModel(
name='Pipeline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField(help_text='A name for this pipeline. Must be unique to the project.', max_length=256)),
('definition', models.JSONField(help_text='The JSON definition of the pipeline.')),
],
),
migrations.CreateModel(
name='PipelineSchedule',
fields=[
('periodictask_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='django_celery_beat.PeriodicTask')),
],
bases=('django_celery_beat.periodictask',),
),
migrations.CreateModel(
name='Queue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The name of the queue.', max_length=512)),
('priority', models.IntegerField(default=0, help_text='The relative priority of jobs placed on the queue.')),
('untrusted', models.BooleanField(default=False, help_text='Whether or not the queue should be sent jobs which run untrusted code.')),
('interrupt', models.BooleanField(default=False, help_text='Whether or not the queue should be sent jobs which can not be interupted.False (default): jobs should not be interrupted')),
],
),
migrations.CreateModel(
name='Worker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='The time that the worker started (time of the first event for the worker).')),
('started', models.DateTimeField(blank=True, help_text="The time that the worker started (only recorded on a 'worker-online' event).", null=True)),
('updated', models.DateTimeField(blank=True, help_text='The time that the last heatbeat was received for the worker.', null=True)),
('finished', models.DateTimeField(blank=True, help_text="The time that the worker finished (only recorded on a 'worker-offline' event)", null=True)),
('hostname', models.CharField(help_text='The `hostname` of the worker.', max_length=512)),
('utcoffset', models.IntegerField(blank=True, help_text='The `utcoffset` of the worker.', null=True)),
('pid', models.IntegerField(blank=True, help_text='The `pid` of the worker.', null=True)),
('freq', models.FloatField(blank=True, help_text="The worker's heatbeat frequency (in seconds)", null=True)),
('software', models.CharField(blank=True, help_text="The name and version of the worker's software.", max_length=256, null=True)),
('os', models.CharField(blank=True, help_text='Operating system that the worker is running on.', max_length=64, null=True)),
('details', models.JSONField(blank=True, help_text='Details about the worker including queues and statsSee https://docs.celeryproject.org/en/stable/userguide/workers.html#statistics', null=True)),
('signature', models.CharField(blank=True, help_text='The signature of the worker used to identify it. It is possible, but unlikely, that two or more active workers have the same signature.', max_length=512, null=True)),
('queues', models.ManyToManyField(help_text='The queues that this worker is listening to.', related_name='workers', to='jobs.Queue')),
],
),
migrations.CreateModel(
name='Zone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The identifier of the queue the job was posted to.', max_length=256, validators=[django.core.validators.RegexValidator('^[a-z][a-z0-9\\-]*$', 'Name should start with a lowercase letter and only contain lowercase letters, digits and hyphens')])),
('account', models.ForeignKey(help_text='The account that this zone is linked to.', on_delete=django.db.models.deletion.CASCADE, related_name='zones', to='accounts.Account')),
],
),
migrations.CreateModel(
name='WorkerHeartbeat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(help_text='The time of the heartbeat.')),
('clock', models.IntegerField(help_text="The tick number of the worker's monotonic clock")),
('active', models.IntegerField(help_text='The number of active jobs on the worker.')),
('processed', models.IntegerField(help_text='The number of jobs that have been processed by the worker.')),
('load', models.JSONField(help_text='An array of the system load over the last 1, 5 and 15 minutes. From os.getloadavg().')),
('worker', models.ForeignKey(help_text='The worker that the heartbeat is for.', on_delete=django.db.models.deletion.CASCADE, related_name='heartbeats', to='jobs.Worker')),
],
),
migrations.AddField(
model_name='queue',
name='zone',
field=models.ForeignKey(blank=True, help_text='The zone this job is associated with.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to='jobs.Zone'),
),
]
|
from neuromp.preprocessing.tree import AST
from enum import IntEnum
from itertools import product
import subprocess
import time
import numpy as np
from copy import deepcopy
from neuromp.preprocessing.convertCToCuda import constroiCuda
class VarStates(IntEnum):
# PONTEIR = 1
SHARED = 1
PRIVATE = 2
# REDUCTION = 2
class Code(object):
def __init__(self, code):
self.ast = AST()
self.statements = self.ast.parse(code)
self.lines = self._getLines(code)
self.for_pos = self.ast.fors[0]
self.pragmas = self._initPragmas()
#TODO ALTERAR AQUI
# self.best_pragma = self._builtPragma()
self.best_pragma = "Execution cannot find correct result"
self.seq_time = None
self.seq_output = None
self.par_time = None
self.par_output = None
self.speed_up = 1.0
#TODO ALTEREI AQUI
self.max_speed_up = -1000
self.best_time = 0
self.actions = list(product(self.ast.variables, list(VarStates)))
self.runSequential()
self.total_time = self.seq_time
def _getLines(self, code):
resp = []
with open(code) as f:
for l in f:
l = l.rstrip()
resp.append(' '.join(l.split()))
return resp
def _initPragmas(self):
resp = {}
all_vars = self.ast.variables
for v in all_vars:
resp[v] = VarStates.SHARED
return resp
#TODO DEVO MUDAR AQUI - PARTE ONDE CONSTROI OS PRAGMAS PARA TESTE
def _builtPragmaDeclaration(self):
print("Construindo pragma")
groups = {}
#resp = "#pragma omp parallel for "
resp = ""
for k, v in self.pragmas.items():
if v.name not in groups:
groups[v.name] = []
groups[v.name].append(k)
print("groups: ")
print(groups)
#Contruir a declaração
# int n = 1;
# int *valor;
# size_t size = n * sizeof(int);
# cudaMallocManaged(&valor, size);
for k in groups:
print(k)
print(groups[k])
print(k.lower())
if k == "REDUCTION":
#TODO DEVE-SE ARRUMAR AQUI PARA USAR O ATOMIC
print("atomic")
resp += "{}(+:{}) ".format(k.lower(), ', '.join(groups[k]))
else:
#TODO AJUSTAR O TIPO DA VARIÁVEL E O TAMANHO DO VETOR
resp += "int n = 1; \n"
resp += "int *" + str(groups[k][0]) + "; \n"
resp += "cudaMallocManaged(&"+str(groups[k][0])+", n * sizeof(int)); \n"
#resp += "{}({}) ".format(k.lower(), ', '.join(groups[k]))
print("resp")
print(resp)
print("resp.rstrip()")
print(resp.rstrip())
return resp.rstrip()
def builtVariablesPonteir(self, tmp_lines):
print("-------------------------------------------------------------_AQUI-----------------------------------------------------------------------")
groups = {}
#resp = "#pragma omp parallel for "
resp = ""
variablesPointer = []
variablesAtomic = []
for k, v in self.pragmas.items():
if v.name not in groups:
groups[v.name] = []
groups[v.name].append(k)
#TODO trocar o SHARED para um nome relacionado o ponteiro
#TODO preciso tratar o caso do count em outro lugar
if v.name == 'SHARED' and k != 'count':
variablesPointer.append(k)
if v.name == 'PRIVATE' and k != 'count':
variablesPointer.append(k)
variablesAtomic.append(k)
print("groups: ")
print(groups)
print(variablesPointer)
for i in range(0, len(tmp_lines), 1):
#TODO ARRUMAR AQUI PARA ALTERAR NA VARIAVEL RECEBIDA
if tmp_lines[i] == "//Chamada da função GPUFuncion\n":
for v in variablesPointer:
tmp_lines[i+1] = tmp_lines[i+1].replace(str(v), "*" + str(v))
#TODO trocar para onde encontrar a variável e for else do primeiro if
if tmp_lines[i] == "valor = 2;\n" or tmp_lines[i] == "valor = valor + 2;\n" or tmp_lines[i] == "valor = valor + 4.0/(1.0 + ((count + 0.5)*(1.0/(double)2147480000))*((count + 0.5)*(1.0/(double)2147480000)));\n":
for v in variablesPointer:
tmp_lines[i] = tmp_lines[i].replace(str(v), str(v) + "[0]")
#TODO ARRUMAR ESSA TROCA, 2 não é sempre fixo
for v in variablesAtomic:
# tmp_lines[i] = tmp_lines[i].replace("[0]", "")
# tmp_lines[i] = "atomicAdd(" + str(v) + ", 2);\n"
tmp_lines[i] = "atomicAdd(" + str(v) + ", 4.0/(1.0 + ((count + 0.5)*(1.0/(double)2147480000))*((count + 0.5)*(1.0/(double)2147480000))));\n"
#TODO trocar para onde encontrar a variável e for else do primeiro if
if tmp_lines[i] == 'printf("%d\\n", valor);\n':
print("TROCAAAAAAAAAAAAAANDOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO")
# tmp_lines[i] = tmp_lines[i].replace(", valor", ", valor[0]")
for v in variablesPointer:
tmp_lines[i] = tmp_lines[i].replace(str(v), str(v) + "[0]")
return tmp_lines
def getEncodedPragma(self):
resp = []
all_vars = self.ast.variables
for v in all_vars:
resp.append(12 + self.pragmas[v].value)
return resp
def getInput(self):
resp = self.getEncodedPragma()
#for s in self.statements:
# resp += self.ast.preproStatement(s)
return np.array(resp)
def runParallel(self):
tmp_lines = deepcopy(self.lines)
tmp_lines, lista_declarao_variaveis, lista_atribuicoes_variaveis, lista_tipo_variavel = constroiCuda(tmp_lines, self.ast.variables)
#TODO converter o código atual para o novo código em CUDA
# print("tmp_lines: ")
# print(tmp_lines)
# print("variaveis: ")
# print(self.ast.variables)
# #constroiCuda(tmp_lines, )
# print("lista_declarao_variaveis: ")
# print(lista_declarao_variaveis)
# print("lista_atribuicoes_variaveis: ")
# print(lista_atribuicoes_variaveis)
# print("lista_tipo_variavel: ")
# print(lista_tipo_variavel)
print(lista_declarao_variaveis[0])
for_pos = 0
#TODO PEGAR DA lista_declarao_variaveis a pos da variavel que estou usando
for i in range(0, len(tmp_lines), 1):
if (tmp_lines[i].replace(' ', '') == lista_declarao_variaveis[0].replace(' ', '')+'\n'):
for_pos = i
tmp_lines[i] = '\n'
print(for_pos)
#TODO PARA ACHAR O FOR_POS PRECISO PERCORRER O CODIGO E ACHAR A DECLARAÇÃO DA VARIVEL QUE ESTOU MEXENDO
tmp_lines.insert(for_pos, self._builtPragmaDeclaration())
# print("tmp_lines novo: ")
# print(tmp_lines)
#print(self._builtPragma())
tmp_lines = self.builtVariablesPonteir(tmp_lines)
with open("tmp_par.cu", "w") as f:
#TODO MUDEI AQUI ACRESCENTANDO ESSAS DUAS PROXIMAS LINHAS
f.write("#include <stdio.h>" + "\n")
#f.write("#include <omp.h>" + "\n")
for l in tmp_lines:
if l != "#pragma neuromp":
f.write(l + "\n")
try:
#gcc fast.c main.c -Wall -Wextra -O3 -I ../../include/ ../../lib/libcapb.a -lm -fopenmp
#subprocess.check_output(['gcc', 'tmp_par.c', '-O3', '-lm', '-fopenmp', '-o', 'tmp_par'],
# stderr=subprocess.STDOUT, universal_newlines=True)
#TODO novo meio para executar em CUDA:
subprocess.check_output(['nvcc', 'tmp_par.cu', '-arch=sm_70', '-o', 'tmp_par', '-run'],
stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as e:
#TODO ALTEREI A LINHA POSTERIOR
# print("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
self.par_output = None
self.par_time = 1000
return self.par_output, self.par_time
b = time.time()
p = subprocess.Popen(['./tmp_par'],
universal_newlines=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
try:
#TODO ALTEREI LINHA POSTERIOR
self.par_output, error = p.communicate(timeout=100000 + self.seq_time)
# self.par_output, error = p.communicate(timeout=self.seq_time)
self.par_time = time.time() - b
self.total_time += self.par_time
self.par_output = self.par_output.rstrip()
print("AQUIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII:")
print(self.par_time)
print(self.par_output)
except subprocess.TimeoutExpired as exc:
self.par_output = None
self.par_time = 1000
#TODO RETIREI O PRINT
# print("Status : TIMEOUT")
return self.par_output, self.par_time
def runSequential(self):
with open("tmp_seq.c", "w") as f:
f.write("#include <stdio.h>" + "\n")
for l in self.lines:
if l != "#pragma neuromp":
f.write(l + "\n")
try:
# subprocess.check_output(['gcc', 'tmp_seq.c', '-O3', '-lm', '-fopenmp', '-o', 'tmp_seq'],
subprocess.check_output(['gcc', 'tmp_seq.c', '-o', 'tmp_seq'],
stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
b = time.time()
p = subprocess.Popen(['./tmp_seq'],
universal_newlines=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
self.seq_output, error = p.communicate()
self.seq_time = time.time() - b
self.seq_output = self.seq_output.rstrip()
return self.seq_output, self.seq_time
def step(self, action):
a = self.actions[action]
self.pragmas[a[0]] = a[1]
#TODO ALTEREI AQUI
# reward = self.getReward()
reward, par_time_now = self.getReward()
next_state = self.getInput()
#TODO ALTERAR AQUI
# done = (reward >= self.max_speed_up)
# done = (reward >= self.max_speed_up and reward != -1)
# print("Testando: " + self._builtPragma())
if (reward >= self.max_speed_up and reward != -1):
print("ENTROU")
self.max_speed_up = reward
self.best_pragma = self._builtPragmaDeclaration()
self.best_time = par_time_now
return next_state, reward, (reward >= self.max_speed_up)
def render(self):
print(self._builtPragmaDeclaration())
def speedUp(self):
return self.seq_time / self.par_time
def getReward(self):
self.runParallel()
if self.seq_output == self.par_output:
s = self.speedUp()
if s > 1.0:
return s, self.par_time
else:
return -1, -1
else:
return -1, -1
def reset(self):
self.pragmas = self._initPragmas()
return self.getInput()
if __name__ == "__main__":
c = Code('../data/pi.c')
print(c.getInput())
#c.setProperty(8)
#c.setProperty(10)
print(c.getInput())
#print(c.actions)
print(c.runSequential())
print(c.runParallel())
print(c.getReward())
|
#=================================================================================================================================================@
#Chapter 2 - Creating a simple first model
#=================================================================================================================================================@
#Setting up a train-test split in scikit-learn
# Create the new DataFrame: numeric_data_only
numeric_data_only = df[NUMERIC_COLUMNS].fillna(-1000)
# Get labels and convert to dummy variables: label_dummies
label_dummies = pd.get_dummies(df[LABELS])
# Create training and test sets
X_train, X_test, y_train, y_test = multilabel_train_test_split(numeric_data_only,label_dummies, size=0.2,seed=123)
# Print the info
print("X_train info:")
print(X_train.info())
print("\nX_test info:")
print(X_test.info())
print("\ny_train info:")
print(y_train.info())
print("\ny_test info:")
print(y_test.info())
#=================================================================================================================================================@
#Training a model
# Import classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
# Create the DataFrame: numeric_data_only
numeric_data_only = df[NUMERIC_COLUMNS].fillna(-1000)
# Get labels and convert to dummy variables: label_dummies
label_dummies = pd.get_dummies(df[LABELS])
# Create training and test sets
X_train, X_test, y_train, y_test = multilabel_train_test_split(numeric_data_only, label_dummies,size=0.2,seed=123)
# Instantiate the classifier: clf
clf = OneVsRestClassifier(LogisticRegression())
# Fit the classifier to the training data
clf.fit(X_train,y_train)
# Print the accuracy
print("Accuracy: {}".format(clf.score(X_test, y_test)))
#=================================================================================================================================================@
#Use your model to predict values on holdout data
# Instantiate the classifier: clf
clf = OneVsRestClassifier(LogisticRegression())
# Fit it to the training data
clf.fit(X_train, y_train)
# Load the holdout data: holdout
holdout = pd.read_csv('HoldoutData.csv', index_col=0)
# Generate predictions: predictions
predictions = clf.predict_proba(holdout[NUMERIC_COLUMNS].fillna(-1000))
#=================================================================================================================================================@
#Writing out your results to a csv for submission
# Generate predictions: predictions
predictions = clf.predict_proba(holdout[NUMERIC_COLUMNS].fillna(-1000))
# Format predictions in DataFrame: prediction_df
prediction_df = pd.DataFrame(columns=pd.get_dummies(df[LABELS]).columns,
index=holdout.index,
data=predictions)
# Save prediction_df to csv
prediction_df.to_csv('predictions.csv')
# Submit the predictions for scoring: score
score = score_submission(pred_path='predictions.csv')
# Print score
print('Your model, trained with numeric data only, yields logloss score: {}'.format(score))
#=================================================================================================================================================@
#Creating a bag-of-words in scikit-learn
# Import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
# Create the token pattern: TOKENS_ALPHANUMERIC
TOKENS_ALPHANUMERIC = '[A-Za-z0-9]+(?=\\s+)'
# Fill missing values in df.Position_Extra
df.Position_Extra.fillna('',inplace=True)
# Instantiate the CountVectorizer: vec_alphanumeric
vec_alphanumeric = CountVectorizer(token_pattern=TOKENS_ALPHANUMERIC)
# Fit to the data
vec_alphanumeric.fit(df.Position_Extra)
# Print the number of tokens and first 15 tokens
msg = "There are {} tokens in Position_Extra if we split on non-alpha numeric"
print(msg.format(len(vec_alphanumeric.get_feature_names())))
print(vec_alphanumeric.get_feature_names()[:15])
#=================================================================================================================================================@
#Combining text columns for tokenization
# Define combine_text_columns()
def combine_text_columns(data_frame, to_drop=NUMERIC_COLUMNS + LABELS):
""" converts all text in each row of data_frame to single vector """
# Drop non-text columns that are in the df
to_drop = set(to_drop) & set(data_frame.columns.tolist())
text_data = data_frame.drop(to_drop, axis=1)
# Replace nans with blanks
text_data.fillna("", inplace=True)
# Join all text items in a row that have a space in between
return text_data.apply(lambda x: " ".join(x), axis=1)
#=================================================================================================================================================@
#What's in a token?
# Import the CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
# Create the basic token pattern
TOKENS_BASIC = '\\S+(?=\\s+)'
# Create the alphanumeric token pattern
TOKENS_ALPHANUMERIC = '[A-Za-z0-9]+(?=\\s+)'
# Instantiate basic CountVectorizer: vec_basic
vec_basic = CountVectorizer(token_pattern=TOKENS_BASIC)
# Instantiate alphanumeric CountVectorizer: vec_alphanumeric
vec_alphanumeric = CountVectorizer(token_pattern=TOKENS_ALPHANUMERIC)
# Create the text vector
text_vector = combine_text_columns(df)
# Fit and transform vec_basic
vec_basic.fit_transform(text_vector)
# Print number of tokens of vec_basic
print("There are {} tokens in the dataset".format(len(vec_basic.get_feature_names())))
# Fit and transform vec_alphanumeric
vec_alphanumeric.fit_transform(text_vector)
# Print number of tokens of vec_alphanumeric
print("There are {} alpha-numeric tokens in the dataset".format(len(vec_alphanumeric.get_feature_names())))
#=================================================================================================================================================@
#=================================================================================================================================================@
#=================================================================================================================================================@
#=================================================================================================================================================@
#=================================================================================================================================================@
#=================================================================================================================================================@
#=================================================================================================================================================@
#=================================================================================================================================================@
#=================================================================================================================================================@
|
import os
import nltk
from typemap import type_map
import contextnet_api as cnapi
from logger import print_edges
import words
from cache import set_cache, string_cache, set_global_root
import cache
import secondary_functions
import cache
import loop
from json_socket import make_socket, send_json
ident_dict = {}
socket = None
def init(socket_uri="ws://127.0.0.1:8009"):
'''
A initialisztion function for anything requiring a first boot.
'''
global socket
if socket_uri is not None:
print 'Making secondary_functions socket'
socket = make_socket(socket_uri)
def apply_to_context(sentence):
'''given data from the token assess, build clean data for the context
engine, populating with missing data as required.
'''
# success, v = get_cache(filepath)
thin_structure_words = tokenize_assess_string(sentence, allow_fetch=True)
# Send to the data cleaner for the next stage of cross referencing.
updated_words = populate_sub_words(thin_structure_words)
words = thin_structure_words
print words.keys()
print 'tree', thin_structure_words['tree']
send_json({ "sentence": sentence })
res = tuple()
if 'words' in words:
for wordset in words['words']:
word, wtype, ident, dict_ref = wordset
print "looking at - ", word, wtype
'''
(Pdb) pp(ident['meta'][0:2])
[{'end': {u'@id': u'/c/en/fruit',
u'label': u'fruit',
u'language': u'en',
u'term': u'/c/en/fruit'},
'id': u'/a/[/r/RelatedTo/,/c/en/apple/,/c/en/fruit/]',
'rel': {u'@id': u'/r/RelatedTo', u'label': u'RelatedTo'},
'start': {u'@id': u'/c/en/apple',
u'label': u'apple',
u'language': u'en',
u'term': u'/c/en/apple'},
'surfaceText': u'[[apple]] is related to [[fruit]]',
'weight': 12.80968383684781},
{'end': {u'@id': u'/c/en/red',
u'label': u'red',
u'language': u'en',
u'term': u'/c/en/red'},
'id': u'/a/[/r/HasProperty/,/c/en/apple/,/c/en/red/]',
'rel': {u'@id': u'/r/HasProperty', u'label': u'HasProperty'},
'start': {u'@id': u'/c/en/apple',
u'label': u'apple',
u'language': u'en',
u'term': u'/c/en/apple'},
'surfaceText': u'[[apple]] can be [[red]]',
'weight': 9.591663046625438}]
'''
word_def = clean_for_context(wordset, ident.get('meta', []))
res += (word_def,)
else:
print 'no word information'
print 'Have {} words'.format(res)
return res
class Word(object):
def __init__(self, word, tag_type):
self.word = word
self.tag_type = tag_type
self.relatives = {}
self._relations = []
self.meaning = None
self.antonyms = None
self.synonyms = None
def add_relative(self, word, word_type, weight=1, sentence=None, language=None):
'''Associate a word with this word in relation to the given type'''
_type = word_type
self._relations.append({
'word': word,
'type': _type,
'sentence': sentence,
'weight': weight,
'language':language,
})
def add_relative_type(self, _type):
if self.relatives.get(_type, None) is None:
self.relatives[_type] = []
def __repr__(self):
s = '<Word("{}" {}) rel: {}~{}>'.format(
self.word,
self.tag_type,
len(self.relatives),
len(self._relations),
)
return s
def clean_for_context(wordset, relations):
'''Given the entire dataset for a word, generate and return a cleaner API
word for use within the API
'''
langs = ['en']
ignore = [u'ExternalURL']
word, wtype, ident, dict_ref = wordset
wc = Word(word, tag_type=wtype)
wc.meaning = dict_ref.get('meaning', None)
wc.antonyms = dict_ref.get('antonym', [])
wc.synonyms = dict_ref.get('synonym', [])
if relations is None:
print 'No relations for {}'.format(wc)
return wc
for edge in relations:
if edge['rel']['label'] in ignore:
print 'skipping external url relation'
continue
try:
endlang = edge['end']['language']
except KeyError as e:
print 'Language key missing'
print edge
continue
# Add the type of edge as a future relationship type.
wc.add_relative_type(edge['rel']['label'])
wc.add_relative(
word=edge['end'],
word_type=edge['rel']['label'],
weight=edge['weight'],
sentence=edge['surfaceText'],
language=edge['end']['language'],
)
return wc
def populate_sub_words(thin_structure_words, allow_fetch=True):
'''A parent string has been tokenized and filled with the raw
cache data of the words. Populate the same for any
sub words, attached synonyms and antonyms
'''
words = thin_structure_words
print 'tree', thin_structure_words['tree']
result = []
if 'words' in words:
for wordset in words['words']:
word, wtype, ident, dict_ref = wordset
# populate similar
subl = dict_ref['synonym']
if subl is None:
continue
print 'digressing synonyms of', word, len(subl)
for sub_word in subl:
print "looking at - ", sub_word
value = tokenize_assess_string(sub_word.lower(), allow_fetch=allow_fetch)
result.append({ 'value': sub_word, 'meta': value })
else:
print 'no word information'
return result
def populate_sub_words_recursive(thin_structure_words, depth=6, _current_depth=0, root_value=None):
print 'populate sub words recursive {}/{}'.format(_current_depth, depth)
words = populate_sub_words(thin_structure_words, allow_fetch=False)
if _current_depth >= depth:
print 'kill depth'
return words
for word_result in words:
if 'words' in word_result['meta']:
print 'recursive inspection of', len(word_result['meta'])
_words = populate_sub_words_recursive(
word_result['meta'],
depth=depth,
_current_depth=_current_depth+1,
root_value=root_value,
)
words.extend(_words)
return words
def ident_object(word, wtype, tokens):
return dict()
def tokenize_assess_string(sentence, allow_fetch=True):
# success, v = get_cache(filepath)
v = cache.get_string_cache(sentence)
if v is None:
v = tokenize(sentence)
cache.set_string_cache(sentence, v)
return assess_tokens(v, allow_fetch=allow_fetch)
def iter_print(g):
print '\n'
for word, typeof in g:
print "{:<20} {:<4} {}".format(word, typeof, type_map.get(typeof, '[NO TYPE]'))
return g
def tokenize(s):
'''
Tokenize the given string sentence consisting of words split by spaces.
Returned is a is of tokenized words
'''
print 'tokenizing input...', type(s)
send_json(type='tokenize', input=s, action='start')
t = nltk.word_tokenize(s)
g = nltk.pos_tag(t)
send_json(type='tokenize', input=s, result=g, action='complete')
return g
def assess_tokens(tokens, allow_fetch=True):
'''
Read and analyze a list of tokens. For each discovered work, entire
word assertions exist.
returns a tuple of tokens with a word ident object.
'''
print 'Assessing:'
send_json(type='assess', action='start', tokens=tokens)
tree = nltk.chunk.ne_chunk(tokens)
res = []
for word, typeof in tokens:
item = (word, typeof, type_map.get(typeof, '[NO TYPE]'), )
res.append(item)
send_json(type='assess', action='complete', result=res, tokens=tokens)
iter_print(tokens)
res = ()
for word, wtype in tokens:
# thin_structure_words['words'][3] == tokens
ident = assess_word_token(word, wtype, tokens, allow_fetch=allow_fetch)
wdd = words.get_word(word)
res = res + ( (word, wtype, ident, wdd), )
send_json(
type='assess',
action='word',
word=wdd,
word_type=wtype,
tokens=tokens,
ident=ident,
)
# print_edges(ident)
return dict(tree=tree, words=res)
def assess_word_token(word, wtype, tokens, allow_fetch=True):
'''
Given a Word "cake", its type "NN" and associated sentence tokens, "I like cake"
identify the word store a cache of identification into the ident_dict.
returned is the word entry to the ident_dict; an object of 'idents', 'words'
and 'wtypes'
'''
ident = "{}_{}".format(word, wtype)
# if ident_dict.get(ident, None) is None:
# # add to flat
# ident_dict[ident] = ident_object(word, wtype, tokens)
if ident_dict.get(word, None) is None:
# add to flat
ident_dict[word] = ident_parent_object(word, wtype, ident, tokens, allow_fetch=allow_fetch)
ident_dict[word]['idents'].add(ident)
ident_dict[word]['words'].add(word)
ident_dict[word]['wtypes'].add(wtype)
return ident_dict[word]
def ident_parent_object(word, wtype, ident, tokens, allow_fetch=True):
'''
Create a single word entry to the identity context, associating any
_wtype additions
word: Literal word given for input: i.e. "cake"
wtype: Token type from nltk Penn Treebank tagging: "NN"
ident: Literal word token assignment identity: "Cake_NN"
tokens: Assosciated sentence tokens.
The meta data within the result will call the to the context API.
'''
res = dict(words=set(), wtypes=set(), idents=set())
res['meta'] = cnapi.api_result(word.lower(), allow_fetch=allow_fetch)
# {'end': {u'@id': u'/c/en/greeting',
# u'label': u'greeting',
# u'language': u'en',
# u'term': u'/c/en/greeting'},
# 'id': u'/a/[/r/IsA/,/c/en/hello/,/c/en/greeting/]',
# 'rel': {u'@id': u'/r/IsA', u'label': u'IsA'},
# 'start': {u'@id': u'/c/en/hello',
# u'label': u'Hello',
# u'language': u'en',
# u'term': u'/c/en/hello'},
# 'surfaceText': u'[[Hello]] is a kind of [[greeting]]',
# 'weight': 4.47213595499958}
return res;
|
import tensorflow as tf
print(tf.__version__)
from models.official.bert import modeling
from models.official.bert import tokenization
bert_pretrained_path = "/Users/Qba/Downloads/cased_L-12_H-768_A-12/bert_model.ckpt"
bert_vocab = "/Users/Qba/Downloads/cased_L-12_H-768_A-12/vocab.txt"
bert_config = modeling.BertConfig.from_json_file("/Users/Qba/Downloads/cased_L-12_H-768_A-12/bert_config.json")
bert_config.to_dict()
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokenizer = tokenization.FullTokenizer(vocab_file=bert_vocab, do_lower_case=False)
tokens = tokenizer.tokenize("Jacob went for a hike!")
tokens = ["[CLS]"] + tokens + ["[SEP]"]
tokens
input_word_ids = tokenizer.convert_tokens_to_ids(tokens)
tokenizer.convert_ids_to_tokens(input_word_ids)
# model = tf.keras.Model()
def get_bert_encoder_model(input_word_ids,
input_mask,
input_type_ids,
config=None,
name=None,
float_type=tf.float32):
"""Wraps the core BERT model as a keras.Model."""
bert_model_layer = modeling.BertModel(config=config, float_type=float_type, name=name)
output = bert_model_layer(input_word_ids, input_mask, input_type_ids, mode="encoder")
bert_model = tf.keras.Model(
inputs=[input_word_ids, input_mask, input_type_ids],
outputs=output)
return bert_model
input_word_ids = tf.keras.Input(shape=(6,), name='tokens', dtype="int32")
input_mask = tf.keras.Input(shape=(6,), name='mask', dtype="int32")
input_type_ids = tf.keras.Input(shape=(6,), name='type', dtype="int32")
model = get_bert_encoder_model(input_word_ids, input_mask, input_type_ids, config=bert_config)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(bert_pretrained_path)
ids1 = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("Jacob went for a hike."))
ids1
ids2 = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("Alan is on a bike."))
ids2
input_word_ids = tf.constant([ids1, ids2])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]])
input_type_ids = tf.constant([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]])
model
res = model(inputs=[input_word_ids, input_mask, input_type_ids])
res = res[0]
res = np.sum(res, axis=1)
import numpy as np
np.dot(res, np.transpose(res))
|
import sys
import re
import os
import time
import traceback
from ast import literal_eval
import six
from six.moves import configparser
from ply import lex, yacc
import click
import cchardet as chardet
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
__version__ = '0.0.8'
class BConfig(object):
"""
class to hold all the configurations
"""
def __init__(self):
self.config = configparser.ConfigParser(delimiters=('=', ))
# cite & reference
self.refs = {}
self.cited = []
# contents
self.contents = []
# the dict stores current tag for each heading level
self.heading_tag = {}
# footnote list
self.footnotes = []
# alias
self.alias = {}
self._scan = 0
self._need_scan = True # at least scan once
self.scan_info = {}
def __getitem__(self, item):
if isinstance(item, six.string_types):
items = item.split(':')
if len(items) == 1:
return self.get_cfg('DEFAULT', items[0])
elif len(items) >= 2:
return self.get_cfg(items[0], ':'.join(items[1:]))
return ""
def __setitem__(self, item, value):
if isinstance(item, six.string_types):
items = item.split(':')
if len(items) == 1:
return self.set_cfg('DEFAULT', items[0], value)
elif len(items) >= 2:
return self.set_cfg(items[0], ':'.join(items[1:]), value)
return ""
def get_vars(self):
if self.config.has_section('v'):
return dict(self.config._sections['v'])
return {}
def set_vars(self, sec):
self.config.remove_section('v')
self.config.add_section('v')
for k, v in six.iteritems(sec):
self.config.set('v', k, v)
def reset_options(self):
for k, _ in self.config.items('DEFAULT'):
self.config.remove_option('DEFAULT', k)
self.load(bsmdoc_conf)
self.set_updated(time.gmtime(), True)
self['title'] = ''
self['doctitle'] = '%(TITLE)s'
self['subtitle'] = ''
self['show_source'] = False
self['heading_numbering'] = False
self['heading_numbering_start'] = 1
self['heading_in_contents'] = True
self['show_table_of_contents'] = False
self['image_numbering'] = False
self['image_numbering_prefix'] = 'Fig.'
self['image_numbering_num_prefix'] = ''
self['image_numbering_next_tag'] = 0
self['video_numbering'] = 'image' # share numbering with image block
self['video_numbering_prefix'] = 'Video.'
self['video_numbering_num_prefix'] = ''
self['video_numbering_next_tag'] = 0
self['table_numbering'] = False
self['table_numbering_prefix'] = 'Table.'
self['table_numbering_num_prefix'] = ''
self['table_numbering_next_tag'] = 0
self['has_equation_ref'] = False
self.footnotes = []
self.contents = []
self.heading_tag = {}
self.cited = []
self.alias = {}
def set_updated(self, t, forced=False):
time_format = '%Y-%m-%d %H:%M:%S UTC'
if forced or not self['updated']:
self['updated'] = time.strftime(time_format, t)
else:
ct = time.strptime(self['updated'], time_format)
if ct < t:
self['updated'] = time.strftime(time_format, t)
def get_scan(self) -> int:
return self._scan
def next_scan(self):
self._scan += 1
self._need_scan = False
def need_scan(self) -> bool:
return self._need_scan
def request_scan(self):
"""request for a second scan, return false if it is the 2nd scan now"""
if self._scan == 1:
self._need_scan = True
return True
return False
def reset_scan(self):
self._scan = 0
self._need_scan = True
def get_cfg(self, sec, key):
val = ''
if self.config.has_option(sec, key):
val = _to_literal(self.config.get(sec, key))
return val
def set_cfg(self, sec, key, val):
if sec != 'DEFAULT' and not self.config.has_section(sec):
# add section if necessary
self.config.add_section(sec)
self.config.set(sec, key, str(val))
def load(self, txt):
self.config.read_string(txt)
class BParse(object):
"""
class to parse the bsmdoc
"""
# lexer definition
tokens = (
'HEADING',
'NEWPARAGRAPH',
'NEWLINE',
'WORD',
'SPACE',
'TSTART',
'TEND',
'TCELL',
'THEAD',
'TROW',
'RBLOCK',
'BSTART',
'BEND',
'CMD',
'EQUATION',
'INLINEEQ',
'LISTBULLET',
'BRACKETL',
'BRACKETR',
'BRACEL',
'BRACER',
)
states = (
('fblock', 'inclusive'), # function block (parsed normally)
('rblock', 'exclusive'), # raw block (not parsed)
('equation', 'exclusive'), # equation block (not parsed)
('table', 'inclusive'), # table block
('link', 'inclusive') # link block
)
# Tokens
t_ignore = '\t'
t_rblock_ignore = ''
t_equation_ignore = ''
def __init__(self, verbose):
lex.lex(module=self, reflags=re.M)
yacc.yacc(module=self, debug=verbose)
# add function block \__version__ = __version__
BFunction('__version__')(__version__)
self.html = ""
self.config = BConfig()
self.verbose = verbose
self.filename = ""
self._input_stack = []
self.contents = ''
# function block supports embedded block, remember the current block
# level to print the error message correspondingly when error occurs.
self.block_state = []
self.heading_level = 0
def top_block(self):
if self.block_state:
return self.block_state[-1]
return None
def pop_block(self, lineno=-1):
if self.block_state:
args = self.block_state.pop()
self.heading_level = args['heading_level']
self.config.set_vars(args['config'])
args.pop('config', None)
args.pop('heading_level', None)
return args
self._error('no more blocks', lineno=lineno)
return None
def push_block(self, args):
assert isinstance(args, dict)
if args['block'] == 'heading':
self.heading_level = len(self.block_state)
args['heading_level'] = self.heading_level
# save the default section in configuration, so that when leave the
# block, the configuration in the block will not change the upper level
args['config'] = self.config.get_vars()
self.config.set_vars({})
self.block_state.append(args)
def scan(self, txt):
# start next scan
self.config.next_scan()
self._info("scan %d ..." % (self.config.get_scan()))
# save the table of contents collected from previous scan or empty for
# 1st scan
self.config.reset_options()
self.config['filename'] = self.filename
self.config['basename'] = os.path.basename(self.filename)
if os.path.isfile(self.filename):
mt = time.gmtime(os.path.getmtime(self.filename))
else:
mt = time.gmtime()
self.config.set_updated(mt, True)
lex.lexer.lineno = 1
yacc.parse(txt, tracking=True)
def run(self, txt, filename="<input>", lex_only=False):
self.filename = filename
if lex_only:
# output the lexer token for debugging
lex.input(txt)
for tok in lex.lexer:
click.echo(tok)
return None
self.config.reset_scan()
while self.config.need_scan():
self.scan(txt)
self.contents = BFunction().makecontent(self.config.contents)
return self.html
def pop_input(self):
if self._input_stack:
return self._input_stack.pop()
return None
def push_input(self, t, txt):
status = {
'lexdata': t.lexer.lexdata,
'lexpos': t.lexer.lexpos,
'lineno': t.lexer.lineno,
'filename': self.filename
}
self._input_stack.append(status)
t.lexer.input(txt)
t.lexer.lineno = 1
def _touch(self, t):
self.config['lineno'] = t.lexer.lineno
return self.config
def _info(self, msg, **kwargs):
info = self._scan_info(**kwargs)
_bsmdoc_info(msg, **info)
def _warning(self, msg, **kwargs):
info = self._scan_info(**kwargs)
_bsmdoc_warning(msg, **info)
def _error(self, msg, **kwargs):
info = self._scan_info(**kwargs)
_bsmdoc_error(msg, **info)
def _scan_info(self, **kwargs):
info = {'silent': not self.verbose,
'include': self.filename,
'cfg': self.config,
'indent': len(self._input_stack)}
info.update(kwargs)
# update the scan info for BFunction, so it can show the debug info
# (ugly, TODO)
BFunction.scan_info = dict(info)
self.config.scan_info = dict(info)
return info
# lexer
def t_error(self, t):
self._error("illegal character '%s'" % (t.value[0]), lineno=t.lexer.lineno)
t.lexer.skip(1)
def t_eof(self, t):
fn = self.pop_input()
if fn:
t.lexer.input(fn['lexdata'])
t.lexer.lexpos = fn['lexpos']
t.lexer.lineno = fn['lineno']
self.filename = fn['filename']
return t.lexer.token()
return None
# ply uses separate eof function for each state, the default is None.
# define dummy functions to return to the up-level correctly (e.g., include,
# makecontent)
t_fblock_eof = t_eof
t_link_eof = t_eof
t_table_eof = t_eof
def t_INCLUDE(self, t):
r'\#include[^\S\r\n]+[\S]+[\s]*$'
filename = t.value.strip()
filename = filename.replace('#include', '', 1).strip()
kwargs = self._scan_info(lineno=t.lexer.lineno)
txt = BFunction().include(filename, **kwargs)
t.lexer.lineno += t.value.count('\n')
if txt:
self.push_input(t, txt)
self.filename = filename
if os.path.isfile(filename):
self.config.set_updated(time.gmtime(os.path.getmtime(filename)), False)
return t.lexer.token()
return None
def t_MAKECONTENT(self, t):
r'\#makecontent[^\S\r\n]*$'
self.config['show_table_of_contents'] = True
self._warning(r'#makecontent is depreciated, use \config{show_table_of_contents|True}',
lineno=t.lexer.lineno)
return None
# comment starts with "#", except "&#"
def t_COMMENT(self, t):
r'(?<!\&)\#.*'
pass
def t_HEADING(self, t):
r'^[^\S\r\n]*[\=]+[^\S\r\n]*'
t.value = t.value.strip()
return t
def t_LISTBULLET(self, t):
r'^[^\S\r\n]*[\-\*]+[^\S\r\n]*'
t.value = t.value.strip()
return t
# shortcut to define the latex equations, does not support nested statement
def t_EQN(self, t):
r'^[^\S\r\n]*\$\$'
t.lexer.equation_start = t.lexer.lexpos
t.lexer.push_state('equation')
def t_equation_EQN(self, t):
r'\$\$'
t.value = t.lexer.lexdata[t.lexer.equation_start:t.lexer.lexpos - 2]
t.type = 'EQUATION'
t.lexer.lineno += t.value.count('\n')
t.lexer.pop_state()
return t
# everything except '$$'
def t_equation_WORD(self, t):
r'(?:\\.|(\$(?!\$))|[^\$])+'
pass
t_equation_error = t_error
# shortcuts for inline equation
def t_INLINE_EQN(self, t):
r'\$[^\$\n]*\$'
t.type = 'INLINEEQ'
t.lexer.lineno += t.value.count('\n')
t.value = t.value[1:-1]
return t
def t_INLINE_EQN2(self, t):
r'\\\([^\n]*?\\\)'
t.type = 'INLINEEQ'
t.lexer.lineno += t.value.count('\n')
t.value = t.value[2:-2]
return t
# marks to ignore parsing, and it supports nested statement ('{%{% %}%}')
# is valid)
def t_RSTART(self, t):
r'\{\%'
t.lexer.rblock_start = t.lexer.lexpos
t.lexer.rblock_level = 1
t.lexer.push_state('rblock')
def t_rblock_RSTART(self, t):
r'\{\%'
t.lexer.rblock_level += 1
def t_rblock_REND(self, t):
r'\%\}'
t.lexer.rblock_level -= 1
if t.lexer.rblock_level == 0:
t.value = \
t.lexer.lexdata[t.lexer.rblock_start:t.lexer.lexpos - len(t.value)]
t.type = 'RBLOCK'
t.lexer.pop_state()
return t
return None
# ignore '{' if it is followed by '%';
# ignore '%' if it is followed by '}'
# it still has one problem "{%{%%}" will not work; instead we can use '{! \{\% !}'
def t_rblock_WORD(self, t):
r'(?:\\.|(\{(?!\%))|(\%(?!\}))|[^\{\%])+'
t.lexer.lineno += t.value.count('\n')
t_rblock_error = t_error
# function block
def t_BSTART(self, t):
r'\{\!'
t.lexer.push_state('fblock')
return t
def t_fblock_BEND(self, t):
r'[^\S\r\n]*[\n]?\!\}'
t.lexer.pop_state()
t.lexer.lineno += t.value.count('\n')
return t
# table
def t_TSTART(self, t):
r'^[^\S\r\n]*\{\{'
t.lexer.push_state('table')
return t
def t_table_TEND(self, t):
r'^[^\S\r\n]*\}\}'
t.lexer.pop_state()
return t
def t_table_THEAD(self, t):
r'[\s]*\|\+'
return t
def t_table_TROW(self, t):
r'[\s]*\|\-'
return t
def t_TCELL(self, t):
r'\|'
return t
def t_BRACEL(self, t):
r'\{'
return t
def t_BRACER(self, t):
r'\}'
return t
# link (ignore '#' in link, so [#anchor] will work)
def t_BRACKETL(self, t):
r'\['
t.lexer.push_state('link')
return t
def t_BRACKETR(self, t):
r'\]'
t.lexer.pop_state()
return t
def t_link_WORD(self, t):
r'(?:\\(\W)|(\!(?!\}))|(\%(?!\}))|(?<=\&)\#|[^ \$\%\!\n\|\{\}\[\]\\])+'
t.value = BFunction().escape(t.value)
t.value = re.sub(r'(\\)(.)', r'\2', t.value)
return t
# support the latex stylus command, e.g., \ref{}; and the command must have at
# least 2 characters
def t_CMD(self, t):
r'\\(\w)+'
return t
def t_NEWPARAGRAPH(self, t):
r'\n{2,}'
t.lexer.lineno += t.value.count('\n')
return t
def t_NEWLINE(self, t):
r'\n'
t.lexer.lineno += t.value.count('\n')
return t
def t_SPACE(self, t):
r'[^\S\r\n]+'
t.value = ' '
return t
def t_escape_WORD(self, t):
r'(?:\\(\W))+'
t.value = BFunction().escape(t.value)
t.value = re.sub(r'(\\)(.)', r'\2', t.value)
t.type = 'WORD'
return t
# default state, ignore, '!}', '%}', '|', '[', ']', '{', '}', '\n', ' ', '#', '$'
def t_WORD(self, t):
r'(?:(\!(?!\}))|(\%(?!\}))|(?<=\&)\#|[^ \$\%\!\#\n\|\{\}\[\]\\])+'
t.value = BFunction().escape(t.value)
t.value = re.sub(r'(\\)(.)', r'\2', t.value)
return t
"""
article : sections
sections : sections block
| block
block : HEADING logicline
| paragraph
| table
| BSTART sections BEND
| BSTART blockargs sections BEND
| RBLOCK
| EQUATION
| listbullet
paragraph : text NEWPARAGRAPH
| text
blockargs : blolkargs vtext TCELL
| vtext TCELL
table : TSTART thead tbody TEND
| TSTART tbody TEND
tbody : tbody trow
| trow
trow : vtext TROW rowsep
thead: vtext THEAD rowsep
rowsep : rowsep SPACE
| rowsep NEWLINE
| rowsep NEWPARAGRAPH
| SPACE
| NEWLINE
| NEWPARAGRAPH
| empty
listbullet : listbullet LISTBULLET logicline
| LISTBULLET logicline
text : text logicline
| logicline
logicline : line
| line NEWLINE
| bracetext
| bracetext NEWLINE
bracetext : BRACEL sections BRACER
line : line inlineblock
| line plaintext
| inlineblock
| plaintext
inlineblock: CMD
| CMD bracetext
| CMD BRACEL vtext sections BRACER
| INLINEEQ
| BRACLETL sections BRACKETL
| BRACKETL sections TCELL sections BRACKETR
plaintext : plaintext WORD
| plaintext SPACE
| WORD
| SPACE
| empty
empty :
"""
def p_article(self, p):
'''article : sections'''
self.html = p[1]
def p_sections_multi(self, p):
'''sections : sections block'''
p[0] = p[1] + p[2]
def p_sections_single(self, p):
'''sections : block'''
p[0] = p[1]
def p_heading(self, p):
'''block : heading_start logicline'''
# ignore the header level 7 or higher
if len(p[1].strip()) <= 6:
p[0] = self.cmd_helper(['heading', p[1].strip()],
p[2].strip(),
lineno=p.lineno(1))
else:
p[0] = ""
self.pop_block()
def p_heading_start(self, p):
'''heading_start : HEADING'''
self.push_block({'block': 'heading', 'lineno': p.lineno(1)})
p[0] = p[1]
def p_block_paragraph(self, p):
'''block : paragraph'''
# add <P> tag to any text which is not in a function block and ended
# with '\n
if not p[1].strip():
p[0] = ""
elif len(self.block_state) == self.heading_level and p[1].endswith('\n'):
p[0] = BFunction().tag(p[1].strip(), 'p') + '\n'
else:
p[0] = p[1]
def p_paragraph_multiple(self, p):
'''paragraph : text NEWPARAGRAPH'''
if p[1]:
p[0] = p[1] + '\n'
#'<p>%s</p>' %(p[1])
#p[0] = bsmdoc_div(p[0], ['para'])
else:
p[0] = ''
def p_paragraph_single(self, p):
'''paragraph : text'''
p[0] = p[1]
def p_block_table(self, p):
'''block : table'''
p[0] = p[1]
def p_table_title(self, p):
'''table : tstart tbody TEND'''
p[0] = self.cmd_helper(["table"], p[2])
self.pop_block()
def p_table(self, p):
'''table : tstart thead tbody TEND'''
p[0] = self.cmd_helper(["table", p[2]], p[3])
self.pop_block()
def p_table_start(self, p):
'''tstart : TSTART'''
self.push_block({'block': 'table', 'lineno': p.lineno(1)})
p[0] = ''
def p_tbody_multi(self, p):
'''tbody : tbody trow'''
p[0] = p[1] + p[2]
def p_tbody_single(self, p):
'''tbody : trow'''
p[0] = p[1]
def p_trow(self, p):
'''trow : vtext TROW rowsep'''
row = ''.join([BFunction().tag(t.strip(), 'td') for t in p[1]])
p[0] = BFunction().tag(row, 'tr')
def p_thead(self, p):
'''thead : vtext THEAD rowsep'''
# THEAD indicates the current row is header
tr = ''.join([BFunction().tag(t.strip(), 'th') for t in p[1]])
p[0] = BFunction().tag(tr, 'tr')
def p_rowsep(self, p):
'''rowsep : rowsep SPACE
| rowsep NEWLINE
| rowsep NEWPARAGRAPH
| SPACE
| NEWLINE
| NEWPARAGRAPH
| empty'''
p[0] = ''
def p_block_start(self, p):
"""bstart : BSTART"""
p[0] = ''
self.push_block({'block': 'fun', 'lineno': p.lineno(1)})
def p_block_end(self, p):
"""bend : BEND"""
p[0] = ''
def p_block(self, p):
'''block : bstart sections bend'''
p[0] = p[2]
self.pop_block()
def p_block_arg(self, p):
'''block : bstart blockargs sections bend'''
cmds = p[2]
p[0] = p[3]
for c in reversed(cmds):
if not c:
continue
p[0] = self.cmd_helper(c, p[0], lineno=p.lineno(2))
self.pop_block()
def p_blockargs_multi(self, p):
'''blockargs : blockargs vtext TCELL'''
p[0] = p[1] + [p[2]]
def p_blockargs_single(self, p):
'''blockargs : vtext TCELL'''
p[0] = [p[1]]
def p_block_raw(self, p):
'''block : RBLOCK'''
p[0] = p[1]
def p_block_eqn(self, p):
'''block : EQUATION'''
p[0] = self.cmd_helper(["math"], p[1], lineno=p.lineno(1))
def p_block_listbullet(self, p):
'''block : listbullet'''
p[0] = p[1]
p[0] = self.cmd_helper(["listbullet"], p[1], lineno=p.lineno(1))
def p_listbullet_multi(self, p):
'''listbullet : listbullet LISTBULLET logicline'''
p[0] = p[1]
p[0].append([(p[2].strip()), p[3]])
def p_listbullet_single(self, p):
'''listbullet : LISTBULLET logicline'''
p[0] = [[(p[1].strip()), p[2]]]
# text separated by vertical bar '|'
def p_vtext_multi(self, p):
'''vtext : vtext sections TCELL'''
p[0] = p[1]
p[0].append(p[2].strip())
def p_vtext_single(self, p):
'''vtext : sections TCELL'''
p[0] = [p[1].strip()]
def p_text_multi(self, p):
'''text : text logicline'''
p[0] = p[1] + p[2]
def p_text_single(self, p):
'''text : logicline'''
p[0] = p[1]
def p_logicline(self, p):
'''logicline : line
| bracetext'''
p[0] = p[1]
def p_logicline_newline(self, p):
'''logicline : line NEWLINE
| bracetext NEWLINE'''
p[0] = p[1].strip()
if p[0]:
p[0] = p[0] + '\n'
def p_bracetext(self, p):
'''bracetext : BRACEL sections BRACER'''
p[0] = p[2]
def p_line_multi(self, p):
'''line : line plaintext
| line inlineblock'''
p[0] = p[1] + p[2]
def p_line(self, p):
'''line : plaintext
| inlineblock'''
p[0] = p[1]
def p_inlineblock_cmd(self, p):
"""inlineblock : CMD"""
cmd = p[1]
if len(cmd) == 2:
val = cmd
val = val.replace("\\n", '<br>')
p[0] = re.sub(r'(\\)(.)', r'\2', val)
else:
default = re.sub(r'(\\)(.)', r'\2', cmd)
p[0] = self.cmd_helper([cmd[1:]], '', default, p.lineno(1), True)
def p_inlineblock_cmd_multi(self, p):
"""inlineblock : CMD bracetext"""
cmd = p[1]
p[0] = self.cmd_helper([cmd[1:]],
p[2],
lineno=p.lineno(1),
inline=True)
def p_inlineblock_cmd_args(self, p):
"""inlineblock : CMD BRACEL vtext sections BRACER"""
cmd = p[3]
cmd.insert(0, p[1][1:])
p[0] = self.cmd_helper(cmd, p[4], lineno=p.lineno(1), inline=True)
def p_inlineblock_eqn(self, p):
'''inlineblock : INLINEEQ'''
p[0] = self.cmd_helper(["math", "inline"], p[1], lineno=p.lineno(1))
def check_anchor(self, anchor, lineno=-1):
# internal anchor
v = self.config['ANCHOR:%s' % anchor]
if not v:
v = anchor
# do not find the anchor, wait for the 2nd scan
if not self.config.request_scan():
self._warning("broken anchor '%s'" % v, lineno=lineno)
return v
def p_inlineblock_link_withname(self, p):
'''inlineblock : BRACKETL sections TCELL sections BRACKETR'''
s = p[2].strip()
if s[0] == "#":
s = self.check_anchor(s[1:], lineno=p.lineno(2))
p[0] = BFunction().tag(p[4], 'a', 'href="%s"' % p[2])
def p_inlineblock_link(self, p):
'''inlineblock : BRACKETL sections BRACKETR'''
s = p[2].strip()
v = s
if s[0] == '#':
# internal anchor
v = self.check_anchor(s[1:], lineno=p.lineno(2))
p[0] = BFunction().tag(v, 'a', 'href="%s"' % s)
def p_plaintext_multi(self, p):
'''plaintext : plaintext WORD
| plaintext SPACE'''
p[0] = p[1] + p[2]
def p_plaintext_single(self, p):
'''plaintext : WORD
| SPACE
| empty'''
p[0] = p[1]
def p_empty(self, p):
'''empty : '''
p[0] = ''
def p_error(self, p):
blk = self.top_block()
if blk:
self._error('unmatched block "%s"' % (blk['block']), lineno=blk['lineno'])
else:
self._error('syntax %s' % (str(p)), lineno=p.lineno)
def cmd_helper(self, cmds, data, default='', lineno=-1, inline=False):
kwargs = self._scan_info(lineno=lineno, inline=inline)
fun = BFunction.get(cmds[0])
if not fun:
# search global function bsmdoc_* to be compatible with previous
# version
ldict = lex.get_caller_module_dict(1)
fun = ldict.get('bsmdoc_' + cmds[0], None)
if fun:
self._warning('use decorator @BFunction to define function "%s"' %
(cmds[0]), lineno=lineno)
if fun and hasattr(fun, "__call__"):
return fun(data, *cmds[1:], **kwargs)
self._warning('undefined function block "%s".' % cmds[0], lineno=lineno)
if default:
return default
return data
class BFunction(object):
_interfaces = {}
scan_info = {}
def __init__(self, cmd=None):
self.cmd = cmd
@classmethod
def get(cls, intf):
return cls._interfaces.get(intf, None)
@classmethod
def get_all(cls):
return cls._interfaces
@classmethod
def exists(cls, intf):
return cls._interfaces.get(intf, None)
def __call__(self, intf):
name = ""
if hasattr(intf, '__name__'):
name = intf.__name__
if self.cmd:
name = self.cmd
if not name:
raise NameError('Name for function block is missing!')
if name in BFunction._interfaces and BFunction._interfaces[name].func_closure != intf:
# if interface(name) is to be overwritten by something different
_bsmdoc_info('overwrite function block "%s"' % (name), **BFunction.scan_info)
def wrap(data, *args, **kwargs):
if hasattr(intf, '__call__'):
# parse the args from function block, and add it to kwargs
fun_args, fun_kwargs = _bsmdoc_parse_args(*args)
kwargs.update({'fun_args': fun_args, 'fun_kwargs': fun_kwargs})
return str(intf(data, *args, **kwargs))
elif intf and isinstance(intf, six.string_types):
# it is defined as an alias (e.g., with \newfun{bsmdoc|CONTENT}),
# then, \bsmdoc will be replaced with CONTENT
return intf
else:
_bsmdoc_error('unsupported function block "%s"' % (name), **BFunction.scan_info)
return ''
wrap.func_closure = intf
BFunction._interfaces[name] = wrap
return wrap
def __getattr__(self, intf):
if BFunction.exists(intf):
return BFunction.get(intf)
raise AttributeError('Undefined interface "%s"' % (intf))
@BFunction('include')
def bsmdoc_include(data, **kwargs):
filename = data.strip()
if os.path.isfile(filename):
return _bsmdoc_readfile(filename, **kwargs)
else:
_bsmdoc_error("can't not find %s" % filename, **kwargs)
return ""
@BFunction('makecontent')
def bsmdoc_makecontent(contents, **kwargs):
"""
table of contents is a list, each item
[level, text, label]
level: 1~6
text: the caption text
label: the anchor destination
"""
if not contents:
return ""
first_level = min([c[0] for c in contents])
call = []
for c in contents:
# the text has been parsed, so ignore the parsing here
txt = BFunction().tag(c[1], 'a', 'href="#%s"' % c[2])
call.append(['-' * (c[0] - first_level + 1), txt])
return BFunction().listbullet(call)
@BFunction('escape')
def bsmdoc_escape(data, *args, **kwargs):
txt = re.sub(r'(<)', r'<', data)
txt = re.sub(r'(>)', r'>', txt)
return txt
@BFunction('unescape')
def bsmdoc_unescape(data, *args, **kwargs):
txt = re.sub(r'(<)', r'<', data)
txt = re.sub(r'>', r'>', txt)
return txt
def _bsmdoc_info(msg, **kwargs):
lineno = kwargs.get('lineno', -1)
filename = kwargs.get('filename', '') or kwargs.get('include', '')
silent = kwargs.get('silent', False)
indent = kwargs.get('indent', 0)
if silent:
return
info = msg
if lineno != -1:
info = "%3d: %s" % (lineno, info)
if filename:
info = ' '.join([click.format_filename(filename), info])
if indent:
info = ' ' * indent + info
click.echo(info)
def _bsmdoc_error(msg, **kwargs):
kwargs['silent'] = False
_bsmdoc_info('Error ' + msg, **kwargs)
def _bsmdoc_warning(msg, **kwargs):
kwargs['silent'] = False
_bsmdoc_info('Warning ' + msg, **kwargs)
@BFunction('config')
def bsmdoc_config(data, *args, **kwargs):
cfg = kwargs['cfg']
if len(args) <= 0:
# configuration as text
_bsmdoc_info("reading configuration ...", **kwargs)
cfg.load(data)
elif args[0] == 'bsmdoc_conf':
_bsmdoc_info('read configuration from file "%s" ...' % data, **kwargs)
cfg.load(_bsmdoc_readfile(data, **kwargs))
else:
if data.lower() in ['true', 'false']:
data = data.lower() in ['true']
key = args[0].lower()
if key in ['label', 'caption']:
_bsmdoc_warning(
'\\config{{{0}|}} is depreciated, use \\{0}{{}} instead'.
format(key), **kwargs)
key = 'v:' + key
if len(args) > 1 and args[1].lower().strip() == 'add':
val = _to_list(cfg[key])
val += data.split()
cfg[key] = val
else:
cfg[key] = data
return ""
@BFunction('label')
def bsmdoc_label(data, *args, **kwargs):
return BFunction().config(data, 'v:label', *args, **kwargs)
@BFunction('caption')
def bsmdoc_caption(data, *args, **kwargs):
return BFunction().config(data, 'v:caption', *args, **kwargs)
# deal with the equation reference: \ref{} or \eqref{}
@BFunction('eqref')
def bsmdoc_eqref(data, *args, **kwargs):
BFunction().config('true', 'has_equation_ref', *args, **kwargs)
return "\\ref{%s}" % data
@BFunction('ref')
def bsmdoc_ref(data, *args, **kwargs):
# search in links defined with \label{}, so we can use the same
# syntax to add reference to images, sections, and tables.
cfg = kwargs.get('cfg')
v = cfg['ANCHOR:' + data]
if v:
return BFunction().tag(v, 'a', 'href="#%s"' % data)
elif not cfg.request_scan() and not data.startswith('eq'):
# not find the anchor for the 2nd scan
_bsmdoc_warning("probably broken anchor '%s'" % data, **kwargs)
# can not find the anchor, assume its a equation reference for now
return BFunction().eqref(data, *args, **kwargs)
@BFunction('exec')
def bsmdoc_exec(data, *args, **kwargs):
cfg = kwargs.get('cfg')
# check if it only needs to execute the code for the 1st scan
if args and args[0] == "firstRunOnly" and cfg.get_scan() > 1:
return ''
try:
exec(data, globals())
except:
_bsmdoc_error("bsmdoc_exec('%s',%s)" % (data, args), **kwargs)
traceback.print_exc(file=sys.stdout)
return ''
@BFunction('newfun')
def bsmdoc_newfun(data, *args, **kwargs):
if not args or len(args) != 1:
_bsmdoc_error("invalid function definition (%s, %s)" % (args[0], data),
**kwargs)
return ''
name = args[0].strip()
if not name.isidentifier():
_bsmdoc_error(
"invalid function name: %s which should only contain letter, number, '-' and '_'"
% (args[0]), **kwargs)
BFunction(name)(data)
return ""
@BFunction('pre')
def bsmdoc_pre(data, *args, **kwargs):
if args and 'newlineonly' in args:
# only replace newline with '<br>'
return "<br>\n".join(data.split("\n"))
return BFunction().tag(data, "pre")
@BFunction('tag')
def bsmdoc_tag(data, *args, **kwargs):
if len(args) >= 1:
tag = args[0].lower().strip()
if not tag:
_bsmdoc_warning("empty tag", **kwargs)
return data
style = _bsmdoc_style(args[1:])
tag_start = tag
tag_end = tag
data = str(data).strip()
if style:
tag_start = tag_start + ' ' + style
if tag in [
'div', 'ol', 'ul', 'tr', 'table', 'thead', 'tbody', 'figure'
]:
return "<{0}>\n{1}\n</{2}>\n".format(tag_start, data, tag_end)
elif tag in ['area', 'base', 'br', 'col', 'embed', 'hr', 'img', \
'input', 'link', 'meta', 'param', 'source', 'track', 'wbr']:
return "<{0}>".format(tag_start)
return "<{0}>{1}</{2}>".format(tag_start, data, tag_end)
return data
def _code_format(code, obeytabs=False, gobble=0, autogobble=False):
# replace tab with 4 space
if not obeytabs:
code = code.replace('\t', ' ' * 4)
code = code.split('\n')
# remove leading/tailing empty lines
while code and not code[0].strip():
code.pop(0)
while code and not code[-1].strip():
code.pop()
if not code:
return ''
# remove leading space of each line
if autogobble:
gobble = len(code[0]) - len(code[0].lstrip())
for c in code:
if gobble > len(c) - len(c.lstrip()) and c.strip():
gobble = 0
break
return '\n'.join([c[gobble:].rstrip() for c in code])
@BFunction('math')
def bsmdoc_math(data, *args, **kwargs):
cfg = kwargs.get('cfg')
cfg['has_math'] = True
eqn = BFunction().escape(data)
if args and args[0] == 'inline':
return '\\({0}\\)'.format(eqn)
return BFunction().div('$$\n{0}\n$$'.format(_code_format(eqn, autogobble=True)),
'mathjax')
@BFunction('div')
def bsmdoc_div(data, *args, **kwargs):
data = data.strip()
if not args:
_bsmdoc_warning('div block requires at least one argument', **kwargs)
return data
return BFunction().tag(data, 'div', *args, **kwargs)
def _to_list(val) -> list:
if isinstance(val, (list, tuple)):
return list(val)
return [val]
def _to_literal(value):
try:
return literal_eval(value.strip())
except:
# do not strip(), otherwise the space in data will be gone, e.g.,
# self['image_numbering_prefix'] = 'Fig. '
return value
def _bsmdoc_parse_args(*args):
# convert string args
# for any arg in args, if '=' is in arg, i.e., 'key=value', and key is a
# valid python identifier, it will be convert to {'key': 'value'}
# otherwise arg is untouched
opts = []
kwargs = {}
for arg in args:
arg = arg.strip()
if '=' in arg:
tmp = arg.split('=')
key = tmp[0].strip()
if key.isidentifier():
kwargs[key] = _to_literal(''.join(tmp[1:]).strip())
continue
opts.append(_to_literal(arg))
return opts, kwargs
@BFunction('alias')
def bsmdoc_alias(data, *args, **kwargs):
# define alias: \alias{title|this is the title}
# use alias: \alias{title}
cfg = kwargs.get('cfg')
if not args:
name = data.strip()
if name in cfg.alias:
return cfg.alias[name]
_bsmdoc_error('undefined alias "%s"' % (name), **kwargs)
else:
cfg.alias[args[0].strip()] = data
return ""
@BFunction('highlight')
def bsmdoc_highlight(code, *args, **kwargs):
args, opts = kwargs['fun_args'], kwargs['fun_kwargs']
# format code
obeytabs = 'obeytabs' in args
gobble = opts.get('gobble', 0)
autogobble = 'autogobble' in args
code = _code_format(code,
obeytabs=obeytabs,
gobble=gobble,
autogobble=autogobble)
lexer = get_lexer_by_name(args[0], stripnl=False, tabsize=4)
for key in ['obeytabs', 'gobble', 'autogobble']:
opts.pop(key, None)
if "cssclass" not in opts:
opts['cssclass'] = 'syntax-inline' if kwargs.get('inline', False) else 'syntax'
# forward all the other args to HtmlFormatter
formatter = HtmlFormatter(**opts)
# pygments will replace '&' with '&', which will make the unicode
# (e.g., &#xNNNN) shown incorrectly.
txt = highlight(BFunction().unescape(code), lexer, formatter)
txt = txt.replace('&#x', '&#x')
txt = txt.replace('&lt;', '<')
return txt.replace('&gt', '>')
@BFunction('cite')
def bsmdoc_cite(data, *args, **kwargs):
cfg = kwargs.get('cfg')
hide = args and args[0] == 'hide'
ref = cfg.refs.get(data, '')
ref_tag = 1 # the index of the reference
cite_tag = 1 # the index of citation of the reference
if not ref:
if not cfg.request_scan():
_bsmdoc_error("can't find the reference: %s" % data, **kwargs)
return ""
i = 0
for i, c in enumerate(cfg.cited):
if data == c[2]:
if hide:
# the reference has already be cited, no need to do anything
return ""
c[3] += 1
cite_tag = c[3]
ref_tag = c[1]
break
else:
ref_tag = len(cfg.cited) + 1
cite_tag = 1
if hide:
cite_tag = 0
cfg.cited.append(['', ref_tag, data, cite_tag])
i = -1
#
cite_id_prefix = 'cite-%d-' % (ref_tag)
ref_id = 'reference-%d' % ref_tag
# add the reference to the list, which will show at the end of the page
cite_all = []
for c in six.moves.range(1, cite_tag + 1):
anchor = 'href="#%s%d"' % (cite_id_prefix, c)
cite_all.append(BFunction().tag('↩', 'a', anchor))
fn = BFunction().tag(ref + ' ' + ' '.join(cite_all), 'div', 'id="%s"' % ref_id)
cfg.cited[i][0] = fn
ach = ""
if not hide:
cite_id = 'id="%s%d"' % (cite_id_prefix, cite_tag)
ach = BFunction().tag(ref_tag, 'a', cite_id, 'href="#%s"' % ref_id)
ach = '[{0}]'.format(ach)
return ach
@BFunction('reference')
def bsmdoc_reference(data, *args, **kwargs):
cfg = kwargs['cfg']
if not args:
_bsmdoc_error("invalid reference definition: missing alias", **kwargs)
else:
k = args[0].strip()
cfg.refs[k] = data
return ""
@BFunction('footnote')
def bsmdoc_footnote(data, *args, **kwargs):
cfg = kwargs['cfg']
tag = len(cfg.footnotes) + 1
# the footnote definition id
src = 'footnote_src-%d' % tag
# the footnote id
dec = 'footnote-%d' % tag
# add the footnote to the list, which will show at the end of the page
data = data + ' ' + BFunction().tag('↩', 'a', 'href="#%s"' % (src))
fn = BFunction().div(data, 'id="%s"' % dec)
cfg.footnotes.append(fn)
tag = BFunction().tag(tag, 'sup')
return BFunction().tag(tag, 'a', 'id="%s"' % src, 'href="#%s"' % dec)
@BFunction('heading')
def bsmdoc_heading(data, *args, **kwargs):
cfg = kwargs['cfg']
txt = data
pre = data
label = cfg['v:label']
level = len(args[0].strip())
if cfg['heading_numbering']:
start = cfg['heading_numbering_start']
if level >= start:
# build the header number, e.g., 1.1.1.
# cfg.heading_tag stores the current tag for each level
head_tag = cfg.heading_tag
# build the prefix from parent headers
pre = ''
for i in range(start, level):
pre = pre + str(head_tag.get(i, 1)) + '.'
# increase the tag for current level
head_tag[level] = head_tag.get(level, 0) + 1
pre = pre + str(head_tag[level])
# reset all the children level, e.g., if the previous level is
# 1.1.1., and current level is 1.2, then reset the current num
# for level 3 (===) to 0
for key in six.iterkeys(head_tag):
if key > level:
head_tag[key] = 0
# generate the label (e.g., sec-1-1-1) if necessary
if not label:
label = 'sec-' + pre.replace('.', '-')
# add the prefix to the heading text
txt = pre + ' ' + txt
# build the contents
if cfg['heading_in_contents']:
cfg.contents.append([level, txt, label])
if label:
cfg['ANCHOR:%s' % label] = pre
label = 'id="%s"' % label
return BFunction().tag(txt, 'h%d' % level, label) + '\n'
def _bsmdoc_next_tag(sec, **kwargs):
cfg = kwargs['cfg']
if cfg[sec + '_numbering']:
cfg[sec + '_numbering_next_tag'] += 1
prefix = cfg[sec + '_numbering_prefix']
num = cfg[sec + '_numbering_num_prefix'] + str(cfg[sec + '_numbering_next_tag'])
return (str(prefix) + num + '.', num)
return ("", "")
def _bsmdoc_style(args, default_class=None):
style = []
style_class = []
for a in args:
a = a.strip()
if not a:
continue
# by default, 'class="myclass"' can be written as 'myclass' since it is
# frequently set. And if an attribute does not contain "=" should be
# enclosed with quotes, e.g., "controls".
if '=' not in a:
if a[0] == '"' and a[-1] == '"':
if a[1:-1]:
style.append(a[1:-1])
else:
style_class.append(a)
else:
style.append(a)
if not style_class and default_class:
style_class.append(default_class)
if style_class:
style.append('class="%s"' % (' '.join(style_class)))
return ' '.join(style)
def _bsmdoc_prepare_numbering(sec, label, **kwargs):
cfg = kwargs.get('cfg')
tag, num = _bsmdoc_next_tag(sec, **kwargs)
if label:
if cfg.get_scan() == 1 and cfg['ANCHOR%s:' % label]:
_bsmdoc_warning('duplicated label "%s".' % (label), **kwargs)
if not num:
fmt = '{sec} numbering is off, to turn it on: \\config{{{sec}_numbering|True}}'
_bsmdoc_warning(fmt.format(sec=sec), **kwargs)
cfg['ANCHOR:%s' % label] = num
label = 'id="%s"' % label
if tag:
tag = BFunction().tag(tag, 'span', 'tag')
return tag, label
@BFunction('image')
def bsmdoc_image(data, *args, **kwargs):
data = data.strip()
cfg = kwargs.get('cfg')
inline = kwargs.get('inline', False)
txt = BFunction().tag('', 'img', 'src="%s"' % data, 'alt="%s"' % data, *args)
if inline:
return txt
caption = cfg['v:caption']
label = cfg['v:label']
tag, label = _bsmdoc_prepare_numbering('image', label, **kwargs)
if caption:
caption = BFunction().tag(tag + ' ' + caption, 'figcaption', "caption")
txt = '\n'.join([txt, caption])
return BFunction().tag(txt, 'figure', label, 'figure')
@BFunction('video')
def bsmdoc_video(data, *args, **kwargs):
cfg = kwargs['cfg']
src = BFunction().tag("", 'source', 'src="%s"' % data)
src += "\nYour browser does not support the video tag."
txt = BFunction().tag(src, 'video', '"controls"')
caption = cfg['v:caption']
label = cfg['v:label']
# if cfg['video_numbering'], use the same numbering as image
sec = 'image' if cfg['video_numbering'] == 'image' else 'video'
tag, label = _bsmdoc_prepare_numbering(sec, label, **kwargs)
if caption:
caption = BFunction().tag(tag + ' ' + caption, 'div', 'caption')
txt = '\n'.join([txt, caption])
return BFunction().tag(txt, 'div', label, 'video')
@BFunction('table')
def bsmdoc_table(data, *args, **kwargs):
cfg = kwargs['cfg']
head = ""
if args:
head = BFunction().tag(args[0], 'thead')
body = ""
if data:
body = BFunction().tag(data, 'tbody')
label = cfg['v:label']
caption = cfg['v:caption']
tag, label = _bsmdoc_prepare_numbering('table', label, **kwargs)
if caption:
caption = BFunction().tag(tag + ' ' + caption, 'caption')
tbl = BFunction().tag((caption + '\n ' + head + body).strip(), 'table', label)
return tbl
@BFunction('listbullet')
def bsmdoc_listbullet(data, *args, **kwargs):
# data is a list, for each item
# [tag, txt]
# where tag is [-*]+ (e.g., '---', '-*')
def listbullet(stack):
# stack is a list of
# [index in the parent, parent tag, tag, text]
c = '\n'.join([BFunction().tag(item[3], "li") for item in stack])
# only take care of the current level, i.e., leave the parent level to
# parent
level = stack[0][2][len(stack[0][1]):]
for j in level:
tag = 'ul'
if j == r'*':
tag = 'ol'
c = BFunction().tag(c, tag)
return c
if not data:
return ""
# add an empty item for guard
data.append(['', ''])
html = ""
tagp_p = "" # the current parent tag
idxp = 0 # the index of the last item relative to its parent
tagp = "" # the tag of last item
# hold all the items with the current level
# [index in the parent, parent tag, tag, text]
stack = []
# next item
i = 0
while i < len(data):
tag, txt = data[i]
if not stack or tag == tagp:
# same level as the current one, add to the list
idxp += 1
stack.append([idxp, tagp_p, tag, txt])
tagp = tag
i += 1 # retrieve next item
elif os.path.commonprefix([tagp, tag]) == tagp:
# d is the child item of the last item, e.g.,
# tagp = '--', and tag = '--*'
# then, tagp ('--') becomes the current parent level
idxp, tagp_p, tagp = 1, tagp, tag
stack.append([idxp, tagp_p, tag, txt])
i += 1
else:
# not the prefix of the current level, which means the previous
# listbullet ends; and start the new one
# the last idx items are from the same level, build the list
list_txt = listbullet(stack[-idxp:])
stack = stack[:-idxp]
if stack:
idxp, tagp_p, tagp = stack[-1][0], stack[-1][1], stack[-1][2]
stack[-1][3] += list_txt
else:
# the list does not start with the highest level, e.g.
# -- level 2 item 1
# -- level 2 item 2
# - level 1
html += list_txt
idxp, tagp_p, tagp = 0, "", ""
if i < len(data) - 1:
# no warning for the guard item
_bsmdoc_warning("potential wrong level in the list",
**kwargs)
data.pop() # remove the guard
return html
@BFunction('anchor')
def bsmdoc_anchor(data, *args, **kwargs):
data = data.strip()
cfg = kwargs.get('cfg')
cfg['ANCHOR:%s' % data] = data
return BFunction().tag(BFunction().tag("⚓", 'sup'), 'a', 'id="%s"' % data)
def _bsmdoc_readfile(filename, encoding=None, **kwargs):
if not encoding and filename != '-':
# encoding is not define, try to detect it
with open(filename.strip(), 'rb') as fp:
raw = fp.read()
encoding = chardet.detect(raw)['encoding']
_bsmdoc_info("open \"%s\" with encoding \"%s\"" % (filename, encoding),
**kwargs)
with click.open_file(filename, 'r', encoding=encoding) as fp:
txt = fp.read()
txt = txt.encode('unicode_escape').decode()
regexp = re.compile(r'\\u([a-zA-Z0-9]{4})', re.M + re.S)
txt = regexp.sub(r'&#x\1;', txt)
txt = txt.encode().decode('unicode_escape')
return txt
return ""
# generate the html
bsmdoc_conf = """
[html]
begin = <!DOCTYPE html>
<html>
end= </html>
[header]
begin = <head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
end = </head>
content =
bsmdoc_css = ['css/bsmdoc.css']
bsmdoc_js = ['js/bsmdoc.js']
menu_css = ['css/menu.css']
menu_js = ['js/menu.js']
mathjax = <script>
MathJax = {
tex: {
inlineMath: [['\\\\(', '\\\\)']],
tags: "all"
}
};
</script>
<script id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/mathjax@3.0.0/es5/tex-mml-chtml.js">
</script>
jquery = <script src="https://code.jquery.com/jquery-3.5.1.min.js"
integrity="sha256-9/aliU8dGd2tb6OSsuzixeV4y/faTqgFtohetphbbj0="
crossorigin="anonymous"></script>
[body]
begin = <body class="nomathjax">
<div class="layout">
end = </div>
</body>
# default content is
# %(article_menu)s
# <div class="main">
# %(article_title)s
# %(article_content)s
# </div>
content =
[footer]
begin = <div class="footer">
end = </div>
content = <div class="footer-text"> Last updated %(UPDATED)s by
<a href="http://bsmdoc.feiyilin.com/">bsmdoc</a>%(SOURCE)s.</div>
"""
class BDoc(object):
"""class to generate the html file"""
def __init__(self, lex_only=False, verbose=False):
self.verbose = verbose
self.lex_only = lex_only
self.parser = BParse(verbose=self.verbose)
self.cfg = None
self.output_filename = ""
self.html = ""
self.html_text = ""
self.html_body = ""
def parse_string(self, text):
return self.parser.run(text, lex_only=self.lex_only)
def parse(self, filename, encoding=None):
txt = _bsmdoc_readfile(filename, encoding, silent=not self.verbose)
return self.parser.run(txt, filename, self.lex_only)
def gen(self, filename, encoding=None, output=True):
html_body = self.parse(filename, encoding)
if html_body is None:
return ""
self.html_body = html_body
cfg = self.parser.config
html = []
html.append(cfg['html:begin'])
# header
html.append(cfg['header:begin'])
html.append('<meta name="generator" content="bsmdoc %s">' % (__version__))
if cfg['header:content']:
html.append(cfg['header:content'])
css = _to_list(cfg['header:bsmdoc_css'])
js = []
# include bsmdoc.js to show popup reference window if necessary
refjs = self.parser.config['has_equation_ref']
jqueryjs = refjs
if cfg.config.has_section('ANCHOR'):
refs = ('mjx-eqn-', 'img-', 'video-', 'tbl-', 'footnote-', 'reference-')
for key in cfg.config.options('ANCHOR'):
if key.startswith(refs):
refjs = jqueryjs = True
break
if refjs:
js += _to_list(cfg['header:bsmdoc_js'])
if self.parser.config['show_table_of_contents']:
# menu.css shall be after bsmdoc.css as it will change the layout
css += _to_list(cfg['header:menu_css'])
js += _to_list(cfg['header:menu_js'])
jqueryjs = True
css += _to_list(cfg['css'])
js += _to_list(cfg['js'])
for c in css:
if not isinstance(c, str) or not c:
continue
html.append(
BFunction().tag('', 'link', 'rel="stylesheet"', 'href="%s"' % c,
'type="text/css"'))
if cfg['has_math']:
html.append(cfg['header:mathjax'])
if jqueryjs and cfg['header:jquery']:
html.append(cfg['header:jquery'])
for j in js:
if not isinstance(j, str) or not j:
continue
html.append(
BFunction().tag('', 'script', 'type="text/javascript"',
'language="javascript"', 'src="%s"' % j))
if cfg['title']:
html.append(BFunction().tag(cfg['title'], 'title'))
html.append(cfg['header:end'])
# body
html.append(cfg['body:begin'])
# the body:content defines the main architecture of the body
article = []
contents = ''
if self.parser.config['show_table_of_contents']:
contents = self.parser.contents
if contents:
contents = BFunction().div("\n%s\n" % (contents.replace('%', '%%')), 'menu')
cfg['body:article_menu'] = contents
title = self.parser.config['doctitle']
subtitle = self.parser.config['subtitle']
if title:
if subtitle:
title = title + BFunction().div(subtitle, 'subtitle')
title = BFunction().div(title, 'toptitle').strip()
article.append(title)
cfg['body:article_title'] = title
article.append(BFunction().div(html_body, 'content'))
cfg['body:article_content'] = html_body.replace('%', '%%').strip()
html_body = contents + BFunction().div('\n'.join(article), 'main').strip()
try:
if cfg['body:content']:
html_body = cfg['body:content']
except:
traceback.print_exc(file=sys.stdout)
html.append(html_body)
# reference
if cfg.cited:
cites = [BFunction().tag(x[0], 'li') for x in cfg.cited]
cites = BFunction().tag('\n'.join(cites), 'ol')
cites = BFunction().tag(cites, 'div', 'reference')
html.append(cites)
html.append(cfg['footer:begin'])
if cfg.footnotes:
foots = [BFunction().tag(x, 'li') for x in cfg.footnotes]
foots = BFunction().tag('\n'.join(foots), 'ol')
foots = BFunction().tag(foots, 'div', 'footnote')
html.append(foots)
cfg["source"] = ''
if cfg['show_source']:
cfg["source"] = ' ' + BFunction().tag('(source)', 'a', 'href="%s"' % filename)
html.append(cfg['footer:content'])
html.append(cfg['footer:end'])
html.append(cfg['body:end'])
html.append(cfg['html:end'])
self.cfg = cfg
self.html = html
self.html_text = '\n'.join(html)
if filename == '-':
self.output_filename = filename
else:
self.output_filename = os.path.splitext(filename)[0] + '.html'
if output:
with click.open_file(self.output_filename, 'w', encoding=encoding) as fp:
fp.write(self.html_text)
return self.html_text
|
#!/usr/bin/env python
import os, sys
sys.path.insert(0, "..")
import matplotlib.pyplot as plt
import numpy as np
import pprint
import time
import torch
from diff_gpmp2.env.env_2d import Env2D
from diff_gpmp2.robot_models import PointRobot2D
from diff_gpmp2.gpmp2.diff_gpmp2_planner import DiffGPMP2Planner
from diff_gpmp2.utils.helpers import load_params
from diff_gpmp2.utils.sdf_utils import sdf_2d
from diff_gpmp2.utils.planner_utils import check_convergence, straight_line_traj
use_cuda = False
step = False
np.set_printoptions(threshold=sys.maxsize, linewidth=np.inf)
pp = pprint.PrettyPrinter()
torch.set_default_tensor_type(torch.DoubleTensor)
use_cuda = torch.cuda.is_available() if use_cuda else False
device = torch.device('cuda') if use_cuda else torch.device('cpu')
env_file = os.path.abspath("../diff_gpmp2/env/simple_2d/12.png")
plan_param_file = os.path.abspath('configs/gpmp2_2d_params.yaml')
robot_param_file = os.path.abspath('configs/robot_2d.yaml')
env_param_file = os.path.abspath('configs/env_2d_params.yaml')
render = True
np.random.seed(0)
torch.manual_seed(0)
#Load parameters
env_data, planner_params, gp_params, obs_params, optim_params, robot_data = load_params(plan_param_file, robot_param_file, env_param_file, device)
env_params = {'x_lims': env_data['x_lims'], 'y_lims': env_data['y_lims']}
env_image = plt.imread(env_file)
res = (env_params['x_lims'][1] - env_params['x_lims'][0])/((env_image.shape[1])*1.)
env_sdf = sdf_2d(env_image, res=res)
#2D Point robot model
robot = PointRobot2D(robot_data['sphere_radius'][0], use_cuda=use_cuda)
start_conf = torch.tensor([[-4., -4.]], device=device)
start_vel = torch.tensor([[0., 0.]], device=device)
goal_conf = torch.tensor([[4., 4.]], device=device)#[17, 14])
goal_vel = torch.tensor([[0., 0.]], device=device)
startb = torch.cat((start_conf, start_vel), dim=1).unsqueeze(0)
goalb = torch.cat((goal_conf, goal_vel), dim=1).unsqueeze(0)
# th_init.requires_grad_(True)
imb = torch.tensor(env_image, device=device).unsqueeze(0).unsqueeze(0)
sdfb = torch.tensor(env_sdf, device=device).unsqueeze(0).unsqueeze(0)
th_init = straight_line_traj(start_conf, goal_conf, planner_params['total_time_sec'], planner_params['total_time_step'], planner_params['dof'], device)
planner = DiffGPMP2Planner(gp_params, obs_params, planner_params, optim_params, env_params, robot, batch_size=1, use_cuda=use_cuda)
j = 0
th_curr = th_init.unsqueeze(0)
th_init.requires_grad_(True)
th_init_np = th_init.cpu().detach().numpy()
env = Env2D(env_params)
env.initialize_from_file(env_file)
path_init = [th_init_np[i, 0:planner_params['dof']] for i in xrange(planner_params['total_time_step']+1)]
if render:
env.initialize_plot(start_conf.cpu().numpy()[0], goal_conf.cpu().numpy()[0])
env.plot_signed_distance_transform()
env.plot_edge(path_init, color='red')
plt.show(block=False)
stp = time.time()
while True:
print "Current iteration, %d"%j
dthetab, _, err_old, _, _, _, _ = planner.step(th_curr, startb, goalb, imb, sdfb)
if j == 0: err_init = err_old
th_curr = th_curr + dthetab
err_new = planner.error_batch(th_curr, sdfb)
err_delta = err_new - err_old
# if render:
# th_curr_np = th_curr.cpu().detach().numpy()
# path_curr = [th_curr_np[0, i, 0:planner_params['dof']] for i in xrange(planner_params['total_time_step']+1)]
# env.plot_edge(path_curr, color='gray', linestyle='--')#, linewidth=0.1, alpha=1.0-(1.0/(j+0.0001)) )
# plt.show(block=False)
# if step:
# raw_input('Press enter for next step')
j = j + 1
if check_convergence(dthetab[0], j, err_delta[0], optim_params['tol_err'], optim_params['tol_delta'], optim_params['max_iters']):
print('Converged')
break
print('Planning time = %f'%(time.time()-stp))
th_final = th_curr
if render:
th_final_np = th_final.cpu().detach().numpy()
path_final = [th_final_np[0, i, 0:planner_params['dof']] for i in xrange(planner_params['total_time_step']+1)]
env.plot_edge(path_final, color='blue')
# stb= time.time()
# th_final.backward(torch.randn(th_final.shape, device=device))
# print('Backprop time = %f'%(time.time()-stb))
plt.show()
|
# the np.std function does 1/N not 1/N-1
import numpy as np
def mc_polyfit1d(x,y,order,yunc=None,silent=None):
'''
Fits a polynomial of a given order to a set of 1-D data.
Input Parameters:
x - A numpy array of independent values.
y - A numpy array of dependent values.
order - The order of the polynomial.
Optional Parameters:
yunc - A numpy array of uncertainties on the dependent values.
silent - Set to note report the results at the command line.
Output Parameters:
A dict where with the following keys:
coeffs - the polynomial coefficients
var - the variances of the coefficients
covar - the covariance matrix
yfit - the polynomial evaluated at x.
nparm - the number of parameters of the fit
ndof - the number of degrees of freedom
chi2 - the chi^2 value of the fit
rchi2 - the reduced chi^2 value of the
rms - the rms of the fit
Procedure:
This program is based on solving the equation A ## coeff = b. The
alpha (A^T ## A) and beta (A^T ## b) arrays of the normal equations
are constructed and then solved with np.linalg.solve.
Example:
NA
Modification History:
2022-03-09 - Written by <NAME>, University of Toledo.
Based on the gethdrinfo.pro IDL program
'''
if yunc is None: yunc = np.full(len(x),1.0)
# Get rid of NaNs
znonan = ~np.isnan(y)
nnan = np.size(znonan)-np.sum(znonan)
xx = x[znonan]
yy = y[znonan]
yyunc = yunc[znonan]
# Get basic parameters
ndat = len(xx) # number of idependent variables
ncoeffs = order+1 # number of coefficients
ndof = ndat-ncoeffs # number of degrees of freedom
# Construct the alpha and beta matrix of the normal equations.
# Build only the upper triangle of the alpha array since it is symmetric.
exp = np.arange(0,order+1)
alpha = np.zeros((ncoeffs,ncoeffs))
beta = np.zeros(ncoeffs)
b = yy/yyunc
for i in range(0,ncoeffs):
for j in range(i,ncoeffs):
at = (xx**exp[i])/yyunc
a = (xx**exp[j])/yyunc
alpha[i,j] = np.sum(at*a)
beta[i] = np.sum(at*b)
# Now transpose and add to get the other side
alpha = alpha+np.transpose(alpha)
# Finally, divide the diagonal elements by 2 to make up for the addition
# in the transpose
zdiag = np.arange(0,ncoeffs*ncoeffs,ncoeffs+1)
zdiag = np.unravel_index(zdiag,(ncoeffs,ncoeffs))
alpha[zdiag] = alpha[zdiag]/2.
# Solve things (need to remember what you are doing...)
coeffs = np.linalg.solve(alpha,beta)
covar = np.linalg.inv(alpha)
var = np.diagonal(covar)
yfit = np.polynomial.polynomial.polyval(x,coeffs)
residual = y-yfit
rms = np.std(residual[znonan])
chi2 = np.sum( (residual[znonan]/yunc[znonan])**2)
rchi2 = chi2/ndof
# Report results if requested
if silent is not True:
print(' ')
print(' Number of points = ',len(x))
print(' Number of Nans = ',nnan)
print(' Number of parameters = ',order+1)
print(' Number of degrees of freedom = ',ndof)
print(' Chi-Squared = ',chi2)
print(' Reduced Chi-Squared = ',rchi2)
print(' RMS deviation of fit = ',rms)
print(' ')
print('Coefficients:')
print(' ')
for i in range(0,order+1):
print('Coeff #',str(i).zfill(2),': ',coeffs[i],'+-',\
np.sqrt(var[i]),sep='')
print(' ')
print('Covariance Matrix:')
print(covar)
print(' ')
return({"coeffs":coeffs,"var":var,"covar":covar,"yfit":yfit,"nparm":order+1,"ndof":ndof,
"chi2":chi2,"rchi2":rchi2,"rms":rms})
|
# OpenFace API tests.
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import os
import numpy as np
np.set_printoptions(precision=5)
import scipy
import scipy.spatial
import openface
openfaceDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
modelDir = os.path.join(openfaceDir, 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
exampleImages = os.path.join(openfaceDir, 'images', 'examples')
lfwSubset = os.path.join(openfaceDir, 'data', 'lfw-subset')
dlibFacePredictor = os.path.join(dlibModelDir,
"shape_predictor_68_face_landmarks.dat")
model = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
imgDim = 96
align = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(model, imgDim=imgDim)
def _read_to_rgb(imageFile):
imgPath = os.path.join(exampleImages, imageFile)
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
return rgbImg
def test_pipeline():
rgbImg = _read_to_rgb('lennon-1.jpg')
bb = align.getLargestFaceBoundingBox(rgbImg)
print ("Bounding box found was: ")
print (bb)
# assert bb.left() == 341
# assert bb.right() == 1006
# assert bb.top() == 193
# assert bb.bottom() == 859
alignedFace = align.align(imgDim, rgbImg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
rep = net.forward(alignedFace)
cosDist = scipy.spatial.distance.cosine(rep, np.ones(128))
print(cosDist)
#assert np.isclose(cosDist, 0.938840385931, atol=0.01)
def test_pipeline_comparisons():
labels = ['lennon-1', 'lennon-2', 'clapton-1', 'clapton-2', 'adams']
images = dict((k, _read_to_rgb(k + '.jpg')) for k in labels)
bounding_boxes = dict((k, align.getLargestFaceBoundingBox(images[k])) for k in labels)
aligned_faces = dict((k, align.align(imgDim, images[k], bounding_boxes[k])) for k in labels)
non_normalised_features = dict((k, net.forward(aligned_faces[k])) for k in labels)
features = dict((k, non_normalised_features[k] / np.linalg.norm(non_normalised_features[k])) for k in labels)
for k in labels:
print((k, features[k]))
print('')
print('Pairwise comparison table')
print(',' + ','.join(labels))
for outer in labels:
s = [outer]
for inner in labels:
# smaller distance => more similar, zero => identical
dist = scipy.spatial.distance.cosine(features[outer], features[inner])
s.append('%.6f' % dist)
print(','.join(s))
print('') |
import numpy as np
import copy
import matplotlib.pyplot as plt
class Environment:
# considering uniform policy i.e., probability of taking an action in a current state
# actions are deterministic, i.e., resulting state of the agent on an action is deterministic
def __init__(self , discount_factor = 1):
self.discount_factor = discount_factor
self.velocity = (-0.07 , 0.07)
self.position = (-1.2 , 0.6)
self.gridsize = 100
self.goal_reached_reward = 1
self.goal_not_reached_reward = -1
self.velocity_grid = np.linspace(self.velocity[0] , self.velocity[1] , self.gridsize)
self.position_grid = np.linspace(self.position[0], self.position[1] , self.gridsize)
self.position_step = self.position_grid[1] - self.position_grid[0]
self.velocity_step = self.velocity_grid[1] - self.velocity_grid[0]
self.grid = []
self.V = np.zeros((self.gridsize,self.gridsize))
self.policy = np.zeros((self.gridsize,self.gridsize))
for velocity in self.velocity_grid:
self.grid.append([(velocity , position) for position in self.position_grid])
self.action_set = [-1, 0, 1]
pass
def update_function(self , current_pos , current_vel , action):
next_vel = current_vel + (action * 0.001) + np.cos(3 * current_pos)*(-0.0025)
next_vel = min(max(next_vel , self.velocity[0]) , self.velocity[1])
next_pos = current_pos + next_vel*1 # because time step is always 1
next_pos = min(max(next_pos , self.position[0]) , self.position[1])
if (next_pos <= self.position[0]):
next_vel = 0
return (next_pos , next_vel)
def get_posidx_velidx(self , pos , vel):
posidx = int(np.ceil((pos - self.position_grid[0])/self.position_step))
velidx = int(np.ceil((vel - self.velocity_grid[0])/self.velocity_step))
if posidx >= self.gridsize:
posidx = self.gridsize - 1
if velidx >= self.gridsize:
velidx = self.gridsize - 1
return (posidx , velidx)
def is_goal_reached(self , pos):
return (pos >= self.position_grid[-1])
def get_state(self, posidx , velidx):
return (self.position_grid[posidx] , self.velocity_grid[velidx])
def transition_fuction_with_reward(self , state_idx , action):
current_vel , current_pos = self.grid[state_idx[0]][state_idx[1]]
next_pos , next_vel = self.update_function(current_pos , current_vel , action)
next_posidx , next_velidx = self.get_posidx_velidx(next_pos , next_vel)
if (self.is_goal_reached(next_pos)):
return tuple((tuple((next_velidx , next_posidx)) , self.goal_reached_reward))
return tuple((tuple((next_velidx , next_posidx)) , self.goal_not_reached_reward))
def value_iteration(self, epsilon = 1e-4):
gamma = self.discount_factor
policy = copy.deepcopy(self.policy)
V = copy.deepcopy(self.V)
action_set = copy.deepcopy(self.action_set)
gridsize = self.gridsize
# start the value iteration
time_step = 0
cycles = 1000
while True and cycles >= 0:
time_step+=1
cycles -= 1
delta = 0
if (time_step%10 == 0):
print("Time Step: {}".format(time_step))
# for all state
for velidx in range(gridsize):
for posidx in range(gridsize):
state_idx = (velidx , posidx)
v = V[state_idx]
V[state_idx] = - 100000
for action in action_set:
ret = self.transition_fuction_with_reward(state_idx,action)
V[state_idx] = max(V[state_idx] , (ret[1] + gamma * V[ret[0]]))
# print(state_idx , ret)
# return
delta = max(delta , abs(v - V[state_idx]))
if (delta <= epsilon):
break
# find a deterministic policy for all states
for velidx in range(gridsize):
for posidx in range(gridsize):
state_idx = (velidx , posidx)
b = policy[state_idx]
action_best = b
ret = self.transition_fuction_with_reward(state_idx,action_best)
temp = (ret[1] + gamma * V[ret[0]])
for action in action_set:
ret = self.transition_fuction_with_reward(state_idx,action)
if temp <= (ret[1] + gamma * V[ret[0]]):
temp = (ret[1] + gamma * V[ret[0]])
action_best = action
policy[state_idx] = action_best
print("Total Iterations: " , cycles)
print(V , "\n\n")
print(policy , "\n\n")
np.savetxt("value.txt", V, fmt = "%i")
np.savetxt("policy.txt", policy, fmt = "%i")
plt.imshow(V, cmap = "hot" , interpolation="nearest")
plt.show()
def main():
env = Environment()
env.value_iteration()
if __name__ == "__main__":
main()
|
# MIT License
# Copyright (c) 2021 <NAME>, <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Test algorithm in script:
# Test Modbus master and then Modbus slave:
# 1. Test Master PDU size register:
# - Read PDU size register value.
# - Select and write valid random value to PDU size register.
# - Check if read value was not modified.
# 2. Test Master Configuration register:
# - Select and write configuration from the set of all valid combinations.
# - Check if read value is the same.
# - If all combinations passed, write random value.
# - Check if read value is equal to write value,
# which masked with register's significant bits.
# 3. Test Slave address register:
# - Select and write slave address from the set of all valid values.
# - Check if read value is the same.
# - If all combinations passed, write non-valid random value.
# - Check if read value was not modified.
import mb_bsp
import mb_util
from random import randrange
error_count = 0
print('Test Control and status interface for master')
print()
# Test PDU size register
read_pdu_size_cur = mb_bsp.read_mb_master_cs(mb_util.PDU_SIZE_REG)
read_pdu_size_prv = read_pdu_size_cur
random_pdu_size = randrange(mb_util.MB_MIN_PDU_SIZE, mb_util.MB_MAX_PDU_SIZE + 1)
while read_pdu_size_cur == random_pdu_size:
random_pdu_size = randrange(mb_util.MB_MIN_PDU_SIZE, mb_util.MB_MAX_PDU_SIZE + 1)
mb_bsp.write_mb_master_cs(mb_util.PDU_SIZE_REG, random_pdu_size)
read_pdu_size_cur = mb_bsp.read_mb_master_cs(mb_util.PDU_SIZE_REG)
if read_pdu_size_prv == read_pdu_size_cur:
print('PDU size register test Successful')
else:
print('PDU size register test Failed')
error_count += 1
print('read_pdu_size_prv = ', read_pdu_size_prv)
print('read_pdu_size = ', read_pdu_size_cur)
print()
# Test configuration register
mismatch = 0
baud_rate_l = [i for i in range(4)]
conf_bit_l = [i << 8 for i in range(8)]
valid_config_value_l = list()
for baud_rate in baud_rate_l:
for conf_bit in conf_bit_l:
config_value = baud_rate + conf_bit
valid_config_value_l.append(config_value)
mb_bsp.write_mb_master_cs(mb_util.CONFIG_REG, config_value)
read_config_value = mb_bsp.read_mb_master_cs(mb_util.CONFIG_REG)
if read_config_value != config_value:
print('PDU Configuration register test Failed')
error_count += 1
print('config_value = ', config_value)
print('read_config_value = ', read_config_value)
mismatch = 1
fake_config_value = randrange(0, 0xFFFFFFFF + 1)
while fake_config_value in valid_config_value_l:
fake_config_value = randrange(0, 0xFFFFFFFF + 1)
mb_bsp.write_mb_master_cs(mb_util.CONFIG_REG, fake_config_value)
read_config_value = mb_bsp.read_mb_master_cs(mb_util.CONFIG_REG)
config_value = fake_config_value & ((0x7 << 8) | 0x3)
if read_config_value != config_value:
print('PDU fake value Configuration register test Failed')
error_count += 1
print('config_value = ', config_value)
print('fake_config_value = ', fake_config_value)
print('read_config_value = ', read_config_value)
mismatch = 1
if not mismatch:
print('PDU Configuration register test Successful')
print()
# Test Slave address register
mismatch = 0
for i in range(mb_util.MB_MIN_SLAVE_ADDR, mb_util.MB_MAX_SLAVE_ADDR + 1):
valid_slave_addr = i
mb_bsp.write_mb_master_cs(mb_util.SLAVE_ADDR_REG, valid_slave_addr)
read_slave_addr_cur = mb_bsp.read_mb_master_cs(mb_util.SLAVE_ADDR_REG)
read_slave_addr_prv = read_slave_addr_cur
if valid_slave_addr != read_slave_addr_cur:
print('PDU slave address register test Failed')
error_count += 1
print('valid_slave_addr = ', valid_slave_addr)
print('read_slave_addr_cur = ', read_slave_addr_cur)
mismatch = 1
if not mismatch:
print('PDU slave address register test Successful')
fake_slave_addr = randrange(mb_util.MB_MAX_SLAVE_ADDR + 1, 0xFFFFFFFF + 1)
mb_bsp.write_mb_master_cs(mb_util.SLAVE_ADDR_REG, fake_slave_addr)
read_slave_addr_cur = mb_bsp.read_mb_master_cs(mb_util.SLAVE_ADDR_REG)
if read_slave_addr_prv == read_slave_addr_cur:
print('PDU fake slave address register test Successful')
else:
print('PDU fake slave address register test Failed')
error_count += 1
print('fake_slave_addr = ', fake_slave_addr)
print('read_slave_addr_prv = ', read_slave_addr_prv)
print('read_slave_addr_cur = ', read_slave_addr_cur)
print()
print('Test Control and status interface for slave')
print()
# Test PDU size register
read_pdu_size_cur = mb_bsp.read_mb_slave_cs(mb_util.PDU_SIZE_REG)
read_pdu_size_prv = read_pdu_size_cur
random_pdu_size = randrange(mb_util.MB_MIN_PDU_SIZE, mb_util.MB_MAX_PDU_SIZE + 1)
while read_pdu_size_cur == random_pdu_size:
random_pdu_size = randrange(mb_util.MB_MIN_PDU_SIZE, mb_util.MB_MAX_PDU_SIZE + 1)
mb_bsp.write_mb_slave_cs(mb_util.PDU_SIZE_REG, random_pdu_size)
read_pdu_size_cur = mb_bsp.read_mb_slave_cs(mb_util.PDU_SIZE_REG)
if read_pdu_size_prv == read_pdu_size_cur:
print('PDU size register test Successful')
else:
print('PDU size register test Failed')
error_count += 1
print('read_pdu_size_prv = ', read_pdu_size_prv)
print('read_pdu_size = ', read_pdu_size_cur)
print()
# Test configuration register
mismatch = 0
baud_rate_l = [i for i in range(4)]
conf_bit_l = [i << 8 for i in range(8)]
valid_config_value_l = list()
for baud_rate in baud_rate_l:
for conf_bit in conf_bit_l:
config_value = baud_rate + conf_bit
valid_config_value_l.append(config_value)
mb_bsp.write_mb_slave_cs(mb_util.CONFIG_REG, config_value)
read_config_value = mb_bsp.read_mb_slave_cs(mb_util.CONFIG_REG)
if read_config_value != config_value:
print('PDU Configuration register test Failed')
error_count += 1
print('config_value = ', config_value)
print('read_config_value = ', read_config_value)
mismatch = 1
fake_config_value = randrange(0, 0xFFFFFFFF + 1)
while fake_config_value in valid_config_value_l:
fake_config_value = randrange(0, 0xFFFFFFFF + 1)
mb_bsp.write_mb_slave_cs(mb_util.CONFIG_REG, fake_config_value)
read_config_value = mb_bsp.read_mb_slave_cs(mb_util.CONFIG_REG)
config_value = fake_config_value & ((0x7 << 8) | 0x3)
if read_config_value != config_value:
print('PDU fake value Configuration register test Failed')
error_count += 1
print('config_value = ', config_value)
print('fake_config_value = ', fake_config_value)
print('read_config_value = ', read_config_value)
mismatch = 1
if not mismatch:
print('PDU Configuration register test Successful')
print()
# Test Slave address register
mismatch = 0
for i in range(mb_util.MB_MIN_SLAVE_ADDR, mb_util.MB_MAX_SLAVE_ADDR + 1):
valid_slave_addr = i
mb_bsp.write_mb_slave_cs(mb_util.SLAVE_ADDR_REG, valid_slave_addr)
read_slave_addr_cur = mb_bsp.read_mb_slave_cs(mb_util.SLAVE_ADDR_REG)
read_slave_addr_prv = read_slave_addr_cur
if valid_slave_addr != read_slave_addr_cur:
print('PDU slave address register test Failed')
error_count += 1
print('valid_slave_addr = ', valid_slave_addr)
print('read_slave_addr_cur = ', read_slave_addr_cur)
mismatch = 1
if not mismatch:
print('PDU slave address register test Successful')
fake_slave_addr = randrange(mb_util.MB_MAX_SLAVE_ADDR + 1, 0xFFFFFFFF + 1)
mb_bsp.write_mb_slave_cs(mb_util.SLAVE_ADDR_REG, fake_slave_addr)
read_slave_addr_cur = mb_bsp.read_mb_slave_cs(mb_util.SLAVE_ADDR_REG)
if read_slave_addr_prv == read_slave_addr_cur:
print('PDU fake slave address register test Successful')
else:
print('PDU fake slave address register test Failed')
error_count += 1
print('fake_slave_addr = ', fake_slave_addr)
print('read_slave_addr_prv = ', read_slave_addr_prv)
print('read_slave_addr_cur = ', read_slave_addr_cur)
mb_util.print_test_result(error_count == 0)
|
# -*- coding: utf-8 -*-
"""One line description.
Authors:
<NAME> - <EMAIL>
Todo:
"""
import numpy as np
import pandas as pd
import click
def rename(data_cleaned):
"""Rename columns for the annotated task data exported from LabelStudio
in order to be fed into the entailment model.
Args:
TODO
"""
data_renamed = data_cleaned.rename(columns={
'premise': 'sentence1',
'hypothesis': 'sentence2',
'correct_ner': 'gold_label',
'id': 'task_id'
})
orig_idx_map = pd.DataFrame(data_renamed['sentence1'].drop_duplicates())\
.reset_index().set_index('sentence1')['index'].to_dict()
data_renamed['orig_idx'] = data_renamed['sentence1'].map(orig_idx_map)
data_renamed['gold_label'] = data_renamed['gold_label'].apply(
lambda x: 'entailment' if x == 'Entails' else 'neutral')
return data_renamed
def clean(data_raw):
"""Clean annotated task data exported from LabelStudio and simplify the
format.
Args:
data_raw (pd.DataFrame): Raw annotation data from LS.
save_path (str): Path to save the cleaned data.
Returns:
(pd.DataFrame): Cleaned data.
"""
# Remove all unnamed columns.
data_raw = data_raw.loc[:, ~data_raw.columns.str.contains('^Unnamed')]
# Merge annotators to consensus statistics.
tasks_merged_list = []
columns_merged = data_raw.columns.drop(
['notes', 'annotator', 'annotation_id', 'created_at', 'updated_at',
'lead_time', 'correct_ner'])
task_ids = data_raw['id'].unique().tolist()
for task_id in task_ids:
ners = data_raw.loc[data_raw['id'] == task_id,
'correct_ner'].value_counts()\
.reindex(['Entails', 'Not Entails', 'Skip/Error/Ambiguous'],
fill_value=0)
# Use boolean indexing to make sure always returns a DataFrame. Take
# the first task to get the common task information.
task = data_raw.loc[data_raw['id'] == task_id,
columns_merged].iloc[0].copy()
if len(data_raw.loc[data_raw['id'] == task_id,
'correct_ner'].mode()) > 1:
task['correct_ner'] = np.nan
else:
task['correct_ner'] \
= data_raw.loc[data_raw['id'] == task_id,
'correct_ner'].mode()[0]
task['n_entails'] = ners['Entails']
task['n_not_entails'] = ners['Not Entails']
task['n_skip_error_ambiguous'] = ners['Skip/Error/Ambiguous']
tasks_merged_list.append(task)
data_cleaned = pd.concat(tasks_merged_list, axis=1).T
return data_cleaned
def preprocess(data_raw, flatten_error=False):
"""Preprocess the raw data from LS.
Args:
data_raw (pd.DataFrame): Raw annotation data from LS.
flatten_error (bool): Whether to flatten the error to not entails. If
False, ignore all Error/Ambiguous/Skip data.
save_path (str): Path to save the cleaned data.
Returns:
(pd.DataFrame): Preprocessed data.
"""
if flatten_error is True:
raise NotImplementedError('Flatten error is not implemented yet.')
else:
data_raw = data_raw[data_raw['correct_ner']
.isin(['Entails', 'Not Entails'])]
data_cleaned = clean(data_raw)
data_preprocessed = rename(data_cleaned)
return data_preprocessed
@click.command()
@click.argument(
'path-input-data-export', type=str)
@click.argument(
'path-output-data-preprocessed', type=str)
@click.option(
'flatten-error', type=bool)
def main(path_input_data_export, path_output_data_preprocessed, flatten_error):
data_raw = pd.read_csv(path_input_data_export)
data_preprocessed = preprocess(data_raw, flatten_error)
if path_output_data_preprocessed:
data_preprocessed.to_csv(path_output_data_preprocessed, index=False)
if __name__ == '__main__':
main()
|
from aoc import AOC
aoc = AOC(year=2018, day=13)
data = aoc.load()
path_ids = set(["|", "-"])
curve_ids = set(["\\", "/"])
intersection_ids = set(["+"])
cart_ids = set(["<", ">", "^", "v"])
paths = {}
carts = {}
cart_last_turn = {}
y = 0
next_cart_id = 0
for line in data.lines():
for index, c in enumerate(line):
cell = (index, y)
if c == " ":
paths[cell] = None
elif c in path_ids:
paths[cell] = c
elif c in intersection_ids:
paths[cell] = c
elif c in curve_ids:
paths[cell] = c
elif c in cart_ids:
cart = (index, y)
next_cart_id += 1
if c in ("<", ">"):
paths[cell] = "-"
carts[cart] = (
(-1, 0, next_cart_id) if c == "<" else (1, 0, next_cart_id)
)
else:
paths[cell] = "|"
carts[cart] = (
(0, -1, next_cart_id) if c == "^" else (0, 1, next_cart_id)
)
y += 1
for cart in carts:
cart_last_turn[carts[cart][2]] = -1
tick = 0
crash_position = None
crashes = {}
while not crash_position:
cart_cells = sorted(list(carts.keys()), key=lambda x: (x[1], x[0]))
for cart in cart_cells:
cart_velocity = (carts[cart][0], carts[cart][1])
cart_id = carts[cart][2]
del carts[cart]
cart_position = (cart[0], cart[1])
cart_position_next = (cart[0] + cart_velocity[0], cart[1] + cart_velocity[1])
cart_next_id = (cart_position_next[0], cart_position_next[1])
if cart_position_next in carts:
crash_position = cart_position_next
crashes[crash_position] = True
break
if cart_velocity[0] == -1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (0, 1, cart_id)
else:
carts[cart_next_id] = (0, -1, cart_id)
elif cart_velocity[0] == 1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (0, -1, cart_id)
else:
carts[cart_next_id] = (0, 1, cart_id)
elif cart_velocity[1] == -1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (1, 0, cart_id)
else:
carts[cart_next_id] = (-1, 0, cart_id)
elif cart_velocity[1] == 1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (-1, 0, cart_id)
else:
carts[cart_next_id] = (1, 0, cart_id)
elif paths[cart_position_next] in intersection_ids:
cart_last_turn[cart_id] += 1
if cart_last_turn[cart_id] % 3 == 0:
if cart_velocity[0] == 0:
carts[cart_next_id] = (cart_velocity[1], 0, cart_id)
else:
carts[cart_next_id] = (0, -cart_velocity[0], cart_id)
elif cart_last_turn[cart_id] % 3 == 1:
carts[cart_next_id] = (cart_velocity[0], cart_velocity[1], cart_id)
else:
if cart_velocity[0] == 0:
carts[cart_next_id] = (-cart_velocity[1], 0, cart_id)
else:
carts[cart_next_id] = (0, cart_velocity[0], cart_id)
else:
carts[cart_next_id] = (cart_velocity[0], cart_velocity[1], cart_id)
aoc.p1(crash_position)
## Part 2
path_ids = set(["|", "-"])
curve_ids = set(["\\", "/"])
intersection_ids = set(["+"])
cart_ids = set(["<", ">", "^", "v"])
paths = {}
carts = {}
cart_last_turn = {}
y = 0
next_cart_id = 0
for line in data.lines():
for index, c in enumerate(line):
cell = (index, y)
if c == " ":
paths[cell] = None
elif c in path_ids:
paths[cell] = c
elif c in intersection_ids:
paths[cell] = c
elif c in curve_ids:
paths[cell] = c
elif c in cart_ids:
cart = (index, y)
next_cart_id += 1
if c in ("<", ">"):
paths[cell] = "-"
carts[cart] = (
(-1, 0, next_cart_id) if c == "<" else (1, 0, next_cart_id)
)
else:
paths[cell] = "|"
carts[cart] = (
(0, -1, next_cart_id) if c == "^" else (0, 1, next_cart_id)
)
y += 1
for cart in carts:
cart_last_turn[carts[cart][2]] = -1
tick = 0
crash_position = None
crashes = {}
while len(carts) > 1:
cart_cells = sorted(list(carts.keys()), key=lambda x: (x[1], x[0]))
for cart in cart_cells:
if cart not in carts:
continue
cart_velocity = (carts[cart][0], carts[cart][1])
cart_id = carts[cart][2]
del carts[cart]
cart_position = (cart[0], cart[1])
cart_position_next = (cart[0] + cart_velocity[0], cart[1] + cart_velocity[1])
cart_next_id = (cart_position_next[0], cart_position_next[1])
if cart_position_next in carts:
crash_position = cart_position_next
crashes[crash_position] = True
del carts[cart_position_next]
continue
if cart_velocity[0] == -1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (0, 1, cart_id)
else:
carts[cart_next_id] = (0, -1, cart_id)
elif cart_velocity[0] == 1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (0, -1, cart_id)
else:
carts[cart_next_id] = (0, 1, cart_id)
elif cart_velocity[1] == -1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (1, 0, cart_id)
else:
carts[cart_next_id] = (-1, 0, cart_id)
elif cart_velocity[1] == 1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (-1, 0, cart_id)
else:
carts[cart_next_id] = (1, 0, cart_id)
elif paths[cart_position_next] in intersection_ids:
cart_last_turn[cart_id] += 1
if cart_last_turn[cart_id] % 3 == 0:
if cart_velocity[0] == 0:
carts[cart_next_id] = (cart_velocity[1], 0, cart_id)
else:
carts[cart_next_id] = (0, -cart_velocity[0], cart_id)
elif cart_last_turn[cart_id] % 3 == 1:
carts[cart_next_id] = (cart_velocity[0], cart_velocity[1], cart_id)
else:
if cart_velocity[0] == 0:
carts[cart_next_id] = (-cart_velocity[1], 0, cart_id)
else:
carts[cart_next_id] = (0, cart_velocity[0], cart_id)
else:
carts[cart_next_id] = (cart_velocity[0], cart_velocity[1], cart_id)
aoc.p2(list(carts.keys())[0])
|
<gh_stars>0
"""Based on https://github.com/reiinakano/neural-painters-pytorch/blob/master/neural_painters/gan_painter.py"""
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import autograd
from torch.utils.tensorboard import SummaryWriter
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Discriminator(nn.Module):
def __init__(self, action_size, dim=16):
super(Discriminator, self).__init__()
self.dim = dim
self.fc1 = nn.Linear(action_size, dim)
self.conv1 = nn.Conv2d(1, dim, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(dim, dim * 2, 4, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(dim * 2)
self.conv3 = nn.Conv2d(dim * 2, dim * 4, 4, stride=2, padding=1)
self.bn3 = nn.BatchNorm2d(dim * 4)
self.conv4 = nn.Conv2d(dim * 4, dim * 8, 4, stride=2, padding=1)
self.bn4 = nn.BatchNorm2d(dim * 8)
self.fc2 = nn.Linear(4 * 4 * (dim * 8), 1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, images, actions):
actions = F.relu(self.fc1(actions))
actions = actions.view(-1, self.dim, 1, 1)
x = self.leaky_relu(self.conv1(images))
x = x + actions
x = self.leaky_relu(self.bn2(self.conv2(x)))
x = self.leaky_relu(self.bn3(self.conv3(x)))
x = self.leaky_relu(self.bn4(self.conv4(x)))
x = x.flatten(start_dim=1)
x = self.fc2(x)
return x
class Generator(nn.Module):
def __init__(self, action_size, dim=16, noise_dim=16, num_deterministic=0):
super(Generator, self).__init__()
self.dim = dim
self.noise_dim = noise_dim
self.num_deterministic = num_deterministic
self.fc1 = nn.Linear(
action_size + noise_dim, 4 * 4 * (dim * 16)
) # This seems.. wrong. Should it be dim*8?
self.bn1 = nn.BatchNorm2d(dim * 16)
self.deconv1 = nn.ConvTranspose2d(dim * 16, dim * 8, 4, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(dim * 8)
self.deconv2 = nn.ConvTranspose2d(dim * 8, dim * 4, 4, stride=2, padding=1)
self.bn3 = nn.BatchNorm2d(dim * 4)
self.deconv3 = nn.ConvTranspose2d(dim * 4, dim * 2, 4, stride=2, padding=1)
self.bn4 = nn.BatchNorm2d(dim * 2)
self.deconv4 = nn.ConvTranspose2d(dim * 2, 1, 4, stride=2, padding=1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, actions):
if self.noise_dim > 0:
batch_size = actions.shape[0]
noise_concat = torch.randn(
batch_size, self.noise_dim - self.num_deterministic
).to(actions.device)
determ_concat = (
torch.ones(batch_size, self.num_deterministic).to(actions.device) * 0.5
)
actions = torch.cat([actions, noise_concat, determ_concat], dim=1)
x = self.fc1(actions)
x = x.view(-1, self.dim * 16, 4, 4)
x = F.relu(self.bn1(x))
x = F.relu(self.bn2(self.deconv1(x)))
x = F.relu(self.bn3(self.deconv2(x)))
x = F.relu(self.bn4(self.deconv3(x)))
x = F.sigmoid(self.deconv4(x))
return x.view(-1, 1, 64, 64)
class GANMyPaintStrokes(nn.Module):
"""GAN MyPaint Strokes nn.Module for inference"""
def __init__(self, action_size, dim=16, noise_dim=16, num_deterministic=0):
"""
:param action_size: number of dimensions of action
:param dim: dictates size of network
:param noise_dim: number of dimensions of noise vector. if 0, will be purely deterministic
:param num_deterministic: sets neural painter stochasticity during inference. set equal to noise_dim for
purely deterministic neural painter. set to 0 for fully stochastic neural painter.
"""
super(GANMyPaintStrokes, self).__init__()
self.generator = Generator(action_size, dim, noise_dim, num_deterministic)
def forward(self, x):
return self.generator(x)
def load_from_train_checkpoint(self, ckpt_path):
checkpoint = torch.load(ckpt_path)
self.generator.load_state_dict(checkpoint["generator_state_dict"])
print("Loaded from {}. Batch {}".format(ckpt_path, checkpoint["batch_idx"]))
def save_train_checkpoint(
savedir: str,
name: str,
batch_idx: int,
discriminator: Discriminator,
generator: Generator,
opt_disc,
opt_gen,
):
os.makedirs(savedir, exist_ok=True)
obj_to_save = {
"batch_idx": batch_idx,
"discriminator_state_dict": discriminator.state_dict(),
"generator_state_dict": generator.state_dict(),
"opt_disc_state_dict": opt_disc.state_dict(),
"opt_gen_state_dict": opt_gen.state_dict(),
}
torch.save(obj_to_save, os.path.join(savedir, "{}_{}.tar".format(name, batch_idx)))
torch.save(obj_to_save, os.path.join(savedir, "{}_latest.tar".format(name)))
print("saved {}".format("{}_{}.tar".format(name, batch_idx)))
def load_from_latest_checkpoint(
savedir: str,
name: str,
discriminator: Discriminator,
generator: Generator,
opt_disc,
opt_gen,
):
latest_path = os.path.join(savedir, "{}_latest.tar".format(name))
if not os.path.exists(latest_path):
print("{} not found. starting training from scratch".format(latest_path))
return -1
checkpoint = torch.load(latest_path)
discriminator.load_state_dict(checkpoint["discriminator_state_dict"])
generator.load_state_dict(checkpoint["generator_state_dict"])
opt_disc.load_state_dict(checkpoint["opt_disc_state_dict"])
opt_gen.load_state_dict(checkpoint["opt_gen_state_dict"])
print("Loaded from {}. Batch {}".format(latest_path, checkpoint["batch_idx"]))
return checkpoint["batch_idx"]
def calc_gradient_penalty(
discriminator: nn.Module,
real_data: torch.Tensor,
fake_data: torch.Tensor,
actions: torch.Tensor,
device: torch.device,
scale: float,
):
batch_size = real_data.shape[0]
epsilon = torch.rand(1, 1)
epsilon = (
epsilon.expand(batch_size, real_data.nelement() // batch_size)
.contiguous()
.view(batch_size, 1, 64, 64)
)
epsilon = epsilon.to(device)
interpolates = epsilon * real_data + ((1.0 - epsilon) * fake_data)
interpolates.requires_grad = True
disc_interpolates = discriminator(interpolates, actions)
gradients = autograd.grad(
disc_interpolates,
interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True,
)[0]
gradients = gradients.view(batch_size, -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * scale
return gradient_penalty
def train_gan_mypaint_strokes(
dim_size: int = 16,
device: torch.device = "cpu",
noise_dim: int = 16,
disc_iters: int = 5,
save_every_n_steps: int = 25000,
log_every_n_steps: int = 2000,
tensorboard_every_n_steps: int = 100,
tensorboard_log_dir: str = "logdir",
save_dir: str = "gan_train_checkpoints",
save_name: str = "gan_mypaint_strokes",
):
"""Trains GAN for generating MyPaint strokes.
:param action_size: number of dimensions of action
:param dim_size: dictates size of network
:param device: torch.device used in training
:param data_dir: where the training data is stored
:param noise_dim: number of dimensions of noise vector. if 0, network will be deterministic
:param disc_iters: number of iterations of discriminator per iteration of generator
:param save_every_n_steps: save checkpoint every n steps
:param log_every_n_steps: print a log every n steps
:param tensorboard_every_n_steps: log to tensorboard every n steps
:param tensorboard_log_dir: tensorboard log directory
:param save_dir: save directory for checkpoints
:param save_name: save name used for extra identification
"""
# Initialize data loader. Local import to avoid issues with MyPaint lib
from gan_stroke_generator.mypaint_images_data_loader import MyPaintImagesDataLoader
loader = MyPaintImagesDataLoader(64, 64)
action_size = loader.num_action
# Initialize networks and optimizers
discriminator = Discriminator(action_size, dim=dim_size).to(device).train()
generator = (
Generator(action_size, dim=dim_size, noise_dim=noise_dim, num_deterministic=0)
.to(device)
.train()
) # Must always train fully stochastically
discriminator.apply(weights_init)
generator.apply(weights_init)
optim_disc = optim.Adam(discriminator.parameters(), lr=1e-4)
optim_gen = optim.Adam(generator.parameters(), lr=1e-4)
# Initialize networks from latest checkpoint if it exists.
batch_idx_offset = 1 + load_from_latest_checkpoint(
save_dir, save_name, discriminator, generator, optim_disc, optim_gen
)
# Initialize tensorboard a.k.a. greatest thing since sliced bread
writer = SummaryWriter(tensorboard_log_dir)
for batch_idx, batch in enumerate(loader):
batch_idx += batch_idx_offset
strokes = batch["stroke"].float().to(device)
actions = batch["action"].float().to(device)
if (batch_idx + 1) % (disc_iters + 1) == 0:
# Generator step every disc_iters + 1 steps
for p in discriminator.parameters():
p.requires_grad = False # to avoid computation (i copied this code, but this makes no sense i think?)
optim_gen.zero_grad()
generated = generator(actions)
generated_score = torch.mean(discriminator(generated, actions))
generator_loss = generated_score
generator_loss.backward()
optim_gen.step()
writer.add_scalar("generator_loss", generator_loss, batch_idx)
else: # Discriminator steps for everything else
for p in discriminator.parameters():
p.requires_grad = True # they are set to False in generator update
optim_disc.zero_grad()
real_score = torch.mean(discriminator(strokes, actions))
generated = generator(actions)
generated_score = torch.mean(discriminator(generated, actions))
gradient_penalty = calc_gradient_penalty(
discriminator,
strokes.detach(),
generated.detach(),
actions,
device,
10.0,
)
disc_loss = real_score - generated_score + gradient_penalty
disc_loss.backward()
optim_disc.step()
writer.add_scalar("discriminator_loss", disc_loss, batch_idx)
writer.add_scalar("real_score", real_score, batch_idx)
writer.add_scalar("generated_score", generated_score, batch_idx)
writer.add_scalar("gradient_penalty", gradient_penalty, batch_idx)
if batch_idx % tensorboard_every_n_steps == 0:
writer.add_images("img_in", strokes, batch_idx)
writer.add_images("img_out", generated, batch_idx)
if batch_idx % log_every_n_steps == 0:
print("train batch {}".format(batch_idx))
if batch_idx % save_every_n_steps == 0:
save_train_checkpoint(
save_dir,
save_name,
batch_idx,
discriminator,
generator,
optim_disc,
optim_gen,
)
batch_idx_offset = batch_idx + 1
if __name__ == "__main__":
train_gan_mypaint_strokes()
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CAQL network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from caql import dual_ibp_method
from caql import dual_method
FLAGS = flags.FLAGS
class CaqlNet(object):
"""CAQL network class."""
def __init__(self,
session,
state_spec,
action_spec,
hidden_layers,
learning_rate,
learning_rate_action,
learning_rate_ga,
batch_size,
action_maximization_iterations,
name,
l2_loss_flag=False,
simple_lambda_flag=True,
solver=None,
sufficient_ascent_flag=False,
initial_lambda=10.0,
lambda_max=5e3):
"""Creates CAQL networks.
Args:
session: TF session.
state_spec: tf_agents.specs.array_spec.ArraySpec. Specification for state.
action_spec: tf_agents.specs.array_spec.ArraySpec. Specification for
action.
hidden_layers: list of integers. Number of hidden units for each hidden
layer.
learning_rate: float on Q function learning rate.
learning_rate_action: float on action function learning rate.
learning_rate_ga: float. Learning rate for gradient ascent optimizer.
batch_size: int on batch size for training.
action_maximization_iterations: int on CEM/gradient ascent iterations.
name: string on name of network.
l2_loss_flag: bool on using l2 loss.
simple_lambda_flag: bool on using lambda hinge loss.
solver: string on inner max optimizer. Supported optimizers are
"gradient_ascent", "cross_entropy", "ails", "mip".
sufficient_ascent_flag: bool on using sufficient ascent.
initial_lambda: float on initial lambda (only for simple_lambda_flag).
lambda_max: float on lambda upper-bound.
"""
self._session = session
self.state_spec = state_spec
self.action_spec = action_spec
self.state_dim = state_spec.shape[0]
self.action_dim = action_spec.shape[0]
self.action_max = action_spec.maximum
self.action_min = action_spec.minimum
self.hidden_layers = hidden_layers
self.learning_rate = learning_rate
self.learning_rate_action = learning_rate_action
self.learning_rate_ga = learning_rate_ga
self.batch_size = batch_size
self.action_maximization_iterations = action_maximization_iterations
self.name = name
self.lambda_max = lambda_max
if solver == "ails" or solver == "mip":
raise ValueError("AILS and MIP solvers are not supported yet.")
# define placeholders
self._state_tensor = tf.placeholder(
dtype=tf.float32, name="state_tensor", shape=(None, self.state_dim))
self._state_deviation_tensor = tf.placeholder(
dtype=tf.float32,
name="state_deviation_tensor",
shape=(None, self.state_dim))
self._action_tensor = tf.placeholder(
dtype=tf.float32, name="action_tensor", shape=(None, self.action_dim))
self._next_state_tensor = tf.placeholder(
dtype=tf.float32,
name="next_state_tensor",
shape=(None, self.state_dim))
self._reward_tensor = tf.placeholder(
dtype=tf.float32, name="reward_tensor", shape=(None, 1))
self._done_tensor = tf.placeholder(
dtype=tf.bool, name="done_tensor", shape=(None, 1))
self._discount_factor = tf.placeholder(
dtype=tf.float32, name="discounting_factor", shape=())
self._maxq_label = tf.placeholder(
dtype=tf.float32, shape=(None, 1), name="maxq_label")
self._backup_tensor = self._reward_tensor + (1.0 - tf.to_float(
self._done_tensor)) * self._discount_factor * self._maxq_label
self._true_label = tf.placeholder(
dtype=tf.float32, shape=(None, 1), name="true_label")
self.q_function_network = self._build_q_function_net(
self._state_tensor, self._action_tensor)
self.state_perturbed_q_function_network = self.q_function_network \
+ tf.expand_dims(tf.einsum("ij,ij->i",
tf.gradients(self.q_function_network,
self._state_tensor)[0],
self._state_deviation_tensor),
axis=-1)
self._td_rmse = tf.sqrt(
tf.losses.mean_squared_error(
self._reward_tensor + (1.0 - tf.to_float(self._done_tensor)) *
self._discount_factor * self._maxq_label, self.q_function_network))
if simple_lambda_flag:
with tf.variable_scope("{}_{}".format(self.name, "lambda_function")):
lambda_var = tf.Variable(
initial_value=initial_lambda, trainable=True, name="lambda_var")
self.lambda_function_network = tf.tile(
tf.reshape(
tf.minimum(
lambda_max, tf.maximum(0.0, lambda_var),
name="lambda_proj"), (-1, 1)), (self.batch_size, 1))
else:
self.lambda_function_network = self._build_lambda_function_net(
self._state_tensor, self._action_tensor)
# define loss
if l2_loss_flag:
self._q_function_loss = tf.losses.mean_squared_error(
self._true_label, self.q_function_network)
else:
self._q_function_loss = tf.reduce_mean(
self.q_function_network + self.lambda_function_network *
tf.maximum(0.0, self._true_label - self.q_function_network))
self._lambda_function_loss = tf.reduce_mean(
-self.lambda_function_network *
(self._true_label - self.q_function_network))
# Action network to learn argmax of Q
self._best_q_label = tf.placeholder(
dtype=tf.float32, shape=(None, 1), name="best_q_label")
# create network placeholders
self._create_network_var_ph()
self.action_function_network = self._build_action_function_net(
self._state_tensor)
self.dummy_q_function_network = self._build_q_function_net(
self._state_tensor, self.action_function_network)
self._action_function_loss = tf.losses.mean_squared_error(
self._best_q_label, self.dummy_q_function_network)
# optimizer
# NOTE: Increment this by one by inlcuding it only in main_q trainer.
global_step = tf.Variable(
0, name="{}_global_step".format(self.name), trainable=False)
with tf.variable_scope("{}_{}".format(self.name, "optimizer")):
self._action_function_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(
self._action_function_loss,
var_list=tf.trainable_variables("{}_{}".format(
self.name, "action_function")))
self._q_function_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(
self._q_function_loss,
global_step=global_step,
var_list=tf.trainable_variables("{}_{}".format(
self.name, "q_function")))
self._lambda_function_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(
self._lambda_function_loss,
var_list=tf.trainable_variables("{}_{}".format(
self.name, "lambda_function")))
# Tensors for dual solvers
self._create_dual_maxq_label_tensor()
self._create_dual_active_constraint_condition_tensor()
self.solver = solver
self.sufficient_ascent_flag = sufficient_ascent_flag
def _create_network_var_ph(self):
"""Create network variable placeholders."""
self._dummy_network_var_ph = {}
self._vars_tf = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope="{}_{}".format(self.name, "q_function"))
for _, var in enumerate(self._vars_tf):
# define placeholder for weights
self._dummy_network_var_ph["{}_ph".format(var.name)] = tf.placeholder(
dtype=tf.float32, shape=var.shape)
def _create_cross_entropy_action_tensors(self,
num_samples=200,
top_k_portion=0.5):
"""Create tensorflow operations for cross_entropy max_actions."""
top_k_num = int(top_k_portion * num_samples)
self._dynamic_batch_size = tf.placeholder(
dtype=tf.int32, name="dynamic_batch_size")
self._action_init_tensor = tf.placeholder(
dtype=tf.float32,
name="action_init_tensor",
shape=(None, self.action_dim))
self._tolerance_tensor = tf.placeholder(
dtype=tf.float32, name="tolerance_tensor", shape=())
sample_mean_init = self._action_init_tensor
sample_covariance_diag_init = tf.ones_like(self._action_init_tensor)
top_k_value_init = tf.constant(
[np.inf]) * tf.ones(shape=(self._dynamic_batch_size, 1))
top_k_action_samples_init = tf.tile(
tf.expand_dims(tf.zeros_like(self._action_init_tensor), axis=1),
[1, top_k_num, 1])
random_sampler = tfp.distributions.MultivariateNormalDiag(
loc=np.zeros(self.action_dim), scale_diag=np.ones(self.action_dim))
def cond_cross_entropy(itr, cond_terminate, sample_mean,
sample_covariance_diag, top_k_value,
top_k_action_samples):
del sample_mean, sample_covariance_diag, top_k_value, top_k_action_samples
cond_1 = tf.math.less(itr, self.action_maximization_iterations)
return tf.math.logical_and(cond_1, tf.logical_not(cond_terminate))
def body_cross_entropy(itr, cond_terminate, sample_mean,
sample_covariance_diag, top_k_value,
top_k_action_samples):
"""Function for cross entropy search of actions."""
del top_k_action_samples
top_k_value_prev = top_k_value
batch_sample_mean = tf.reshape(
tf.tile(sample_mean, [1, num_samples]),
[self._dynamic_batch_size * num_samples, self.action_dim])
batch_sample_covariance_diag = tf.reshape(
tf.tile(sample_covariance_diag, [1, num_samples]),
[self._dynamic_batch_size * num_samples, self.action_dim])
action_samples = self._action_projection(
batch_sample_mean + batch_sample_covariance_diag * tf.cast(
random_sampler.sample(
sample_shape=[self._dynamic_batch_size * num_samples]),
dtype=tf.float32))
state_samples = tf.reshape(
tf.tile(self._state_tensor, [1, num_samples]),
[self._dynamic_batch_size * num_samples, self.state_dim])
action_samples = tf.reshape(
action_samples,
[self._dynamic_batch_size * num_samples, self.action_dim])
values = tf.reshape(
self._build_q_function_net(state_samples, action_samples),
[self._dynamic_batch_size, num_samples])
# everything is in batch mode
top_k_index = tf.argsort(
values, axis=1, direction="DESCENDING")[:, 0:top_k_num]
top_k_index_1d = tf.reshape(top_k_index,
[self._dynamic_batch_size * top_k_num, 1])
counter_tensor_1d = tf.reshape(
tf.tile(
tf.reshape(
tf.range(self._dynamic_batch_size),
[self._dynamic_batch_size, 1]), [1, top_k_num]),
[self._dynamic_batch_size * top_k_num, 1])
top_k_index_2d = tf.concat([counter_tensor_1d, top_k_index_1d], axis=1)
action_samples = tf.reshape(
action_samples,
[self._dynamic_batch_size, num_samples, self.action_dim])
top_k_action_samples = tf.gather_nd(action_samples, top_k_index_2d)
top_k_action_samples = tf.reshape(
top_k_action_samples,
[self._dynamic_batch_size, top_k_num, self.action_dim])
top_k_values = tf.gather_nd(values, top_k_index_2d)
top_k_values = tf.reshape(top_k_values,
[self._dynamic_batch_size, top_k_num])
# it's a batch_size x 1 tensor
top_k_value = tf.reshape(
tf.reduce_mean(top_k_values, axis=1), [self._dynamic_batch_size, 1])
sample_mean = tf.reduce_mean(top_k_action_samples, axis=1)
sample_covariance_diag = tf.math.reduce_variance(
top_k_action_samples, axis=1)
itr = itr + 1
cond_terminate = tf.less_equal(
tf.reduce_mean(tf.math.abs(top_k_value - top_k_value_prev)),
self._tolerance_tensor)
return itr, cond_terminate, sample_mean, sample_covariance_diag, \
top_k_value, top_k_action_samples
self.cost_optimizer = tf.while_loop(
cond_cross_entropy, body_cross_entropy, [
tf.constant(0),
tf.constant(False), sample_mean_init, sample_covariance_diag_init,
top_k_value_init, top_k_action_samples_init
])
def _create_gradient_ascent_action_tensors(self, eps=1e-6):
"""Create tensorflow operations for gradient ascent max_actions."""
self._action_init_tensor = tf.placeholder(
dtype=tf.float32,
name="action_init_tensor",
shape=(None, self.action_dim))
self._tolerance_tensor = tf.placeholder(
dtype=tf.float32, name="tolerance_tensor", shape=())
with tf.variable_scope("{}_{}".format(self.name, "action_variable")):
self._action_variable_tensor = tf.Variable(
initial_value=self._action_init_tensor,
trainable=True,
name="action_var")
# gradient ascentd
self.cost_now = -tf.reduce_mean(
self._build_q_function_net(self._state_tensor,
self._action_variable_tensor))
self.action_gradient = tf.gradients(self.cost_now,
self._action_variable_tensor)[0]
# normalize the gradient
self.normalized_action_gradient = self.action_gradient / (
eps + tf.linalg.norm(self.action_gradient))
if self.sufficient_ascent_flag:
def cond_sufficient_descent(learning_rate_action,
cond_sufficient_descent, cost_perturbed):
del cost_perturbed
cond_1 = tf.math.greater(learning_rate_action,
self.learning_rate_action)
return tf.math.logical_and(cond_1,
tf.logical_not(cond_sufficient_descent))
def body_sufficient_descent(learning_rate_action,
cond_sufficient_descent,
cost_perturbed,
c_armijo=0.01,
c_goldstein=0.25,
lr_decay=0.1):
"""Function for sufficient descent."""
del cond_sufficient_descent, cost_perturbed
action_variable_perturbed_tensor = self._action_variable_tensor - \
learning_rate_action * self.normalized_action_gradient
cost_perturbed = -tf.reduce_mean(
self._build_q_function_net(self._state_tensor,
action_variable_perturbed_tensor))
# Here the negative gradient corresponds to maximization of Q fun.
sufficient_descent = tf.reduce_sum(self.action_gradient *
-self.normalized_action_gradient)
goldstein_condition = tf.greater_equal(
cost_perturbed, self.cost_now +
c_goldstein * learning_rate_action * sufficient_descent)
armijo_condition = tf.less_equal(
cost_perturbed, self.cost_now +
c_armijo * learning_rate_action * sufficient_descent)
cond_sufficient_descent = tf.logical_and(goldstein_condition,
armijo_condition)
with tf.control_dependencies([cond_sufficient_descent]):
learning_rate_action = learning_rate_action * lr_decay
return learning_rate_action, cond_sufficient_descent, cost_perturbed
# Construct the while loop.
def cond_gradient_ascent(itr, cond_terminate):
cond_1 = tf.math.less(itr, self.action_maximization_iterations)
return tf.math.logical_and(cond_1, tf.logical_not(cond_terminate))
def body_gradient_ascent(itr, cond_terminate, lr_init=100.0):
"""Function for gradient descent."""
del cond_terminate
if self.sufficient_ascent_flag:
# first calculate sufficeint descent
result_sufficient_descent = tf.while_loop(
cond_sufficient_descent, body_sufficient_descent,
[tf.constant(lr_init),
tf.constant(False),
tf.constant(np.inf)])
lr_action = result_sufficient_descent[0]
cost_perturbed = result_sufficient_descent[2]
cond_terminate = tf.less_equal(
tf.math.abs(cost_perturbed - self.cost_now),
self._tolerance_tensor)
else:
# no sufficient descent step
lr_action = self.learning_rate_ga
action_variable_perturbed_tensor = self._action_variable_tensor - \
lr_action * self.normalized_action_gradient
cost_perturbed = -tf.reduce_mean(
self._build_q_function_net(self._state_tensor,
action_variable_perturbed_tensor))
cond_terminate = tf.less_equal(
tf.math.abs(cost_perturbed - self.cost_now),
self._tolerance_tensor)
train_op = tf.train.GradientDescentOptimizer(
learning_rate=lr_action).apply_gradients(
grads_and_vars=[(self.normalized_action_gradient,
self._action_variable_tensor)])
# Ensure that the update is applied before continuing.
with tf.control_dependencies([train_op]):
itr = itr + 1
return itr, cond_terminate
self.cost_optimizer = tf.while_loop(
cond_gradient_ascent, body_gradient_ascent,
[tf.constant(0), tf.constant(False)])
self.action_init_op = tf.initializers.variables(
tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope="{}_{}".format(self.name, "action_variable")))
def _create_dual_maxq_label_tensor(self, method="duality_based"):
"""Approximate the maxq label with dual."""
w_transpose_list = []
b_transpose_list = []
num_layers = 1
for itr, var in enumerate(self._vars_tf):
if itr % 2 == 0:
# even itr, multiplicative weights
if itr == 0:
wx_transpose = self._dummy_network_var_ph["{}_ph".format(
var.name)][:self.state_dim, :]
w_transpose_list.append(self._dummy_network_var_ph["{}_ph".format(
var.name)][self.state_dim:, :])
else:
w_transpose_list.append(self._dummy_network_var_ph["{}_ph".format(
var.name)])
num_layers += 1
else:
# odd itr, additive weights
if itr == 1:
b_transpose_list.append(
tf.tile(
tf.expand_dims(
self._dummy_network_var_ph["{}_ph".format(var.name)],
axis=0), [self.batch_size, 1]) +
tf.matmul(self._next_state_tensor, wx_transpose))
else:
b_transpose_list.append(
tf.tile(
tf.expand_dims(
self._dummy_network_var_ph["{}_ph".format(var.name)],
axis=0), [self.batch_size, 1]))
action_tensor_center = tf.zeros(shape=[self.batch_size, self.action_dim])
l_infty_norm_bound = np.max(self.action_max)
if method == "duality_based":
self.dual_maxq_tensor = dual_method.create_dual_approx(
num_layers, self.batch_size, l_infty_norm_bound, w_transpose_list,
b_transpose_list, action_tensor_center)
elif method == "ibp":
# ibp dual solver
self.dual_maxq_tensor = dual_ibp_method.create_dual_ibp_approx(
num_layers, self.batch_size, l_infty_norm_bound, w_transpose_list,
b_transpose_list, action_tensor_center)
else:
# mix method
dual_maxq_tensor = dual_method.create_dual_approx(
num_layers, self.batch_size, l_infty_norm_bound, w_transpose_list,
b_transpose_list, action_tensor_center)
dual_ibp_maxq_tensor = dual_ibp_method.create_dual_ibp_approx(
num_layers, self.batch_size, l_infty_norm_bound, w_transpose_list,
b_transpose_list, action_tensor_center)
# minimum of the upper-bound
self.dual_maxq_tensor = tf.minimum(dual_maxq_tensor, dual_ibp_maxq_tensor)
def _create_dual_active_constraint_condition_tensor(self):
"""Create active constraint condition."""
# It's a 1D boolean tensor with length=batch_size
self.dual_active_constraint_condition_tensor = tf.reshape(
tf.math.greater(self._backup_tensor, self.q_function_network), [-1])
def _action_projection(self, action):
"""Action projection."""
if isinstance(action, np.ndarray):
return np.minimum(self.action_spec.maximum,
np.maximum(self.action_spec.minimum, action))
else:
# tf version
return tf.minimum(
self.action_spec.maximum,
tf.maximum(self.action_spec.minimum, tf.cast(action, tf.float32)))
def _build_action_function_net(self, state):
"""Build action network."""
# define network
with tf.variable_scope(
"{}_{}".format(self.name, "action_function"),
reuse=tf.compat.v1.AUTO_REUSE):
net = tf.layers.flatten(state, name="flatten_0")
for i, hidden_units in enumerate(self.hidden_layers):
net = tf.layers.dense(net, hidden_units, name="dense_%d" % i)
net = tf.layers.batch_normalization(net)
net = tf.nn.relu(net)
net = tf.layers.dense(net, self.action_dim, name="action_output")
# make sure actions are bounded
net = self._action_projection(net)
return net
def _build_q_function_net(self, state, action):
"""Build q_function network."""
# define network
with tf.variable_scope(
"{}_{}".format(self.name, "q_function"), reuse=tf.compat.v1.AUTO_REUSE):
net = tf.layers.flatten(state, name="q_flatten_0")
net = tf.concat([net, action], axis=-1)
for i, hidden_units in enumerate(self.hidden_layers):
net = tf.layers.dense(
net, hidden_units, activation=tf.nn.relu, name="q_dense_%d" % i)
net = tf.layers.dense(net, 1, name="q_output")
return net
def _build_lambda_function_net(self, state, action):
"""Build lambda_function network."""
# define network
with tf.variable_scope(
"{}_{}".format(self.name, "lambda_function"),
reuse=tf.compat.v1.AUTO_REUSE):
net = tf.layers.flatten(state, name="lambda_flatten_0")
net = tf.concat([net, action], axis=-1)
for i, hidden_units in enumerate(self.hidden_layers):
net = tf.layers.dense(
net,
hidden_units,
activation=tf.nn.relu,
name="lambda_dense_%d" % i)
net = tf.layers.dense(net, 1, name="lambda_output")
net = tf.minimum(
self.lambda_max,
tf.maximum(0.0, tf.cast(net, tf.float32)),
name="lambda_proj")
return net
def predict_action_function(self, state):
"""Predict action function.
Predict the best action for the given state using action function.
Args:
state: np.ndarray for state.
Returns:
Tensor for the predicted best action for the given `state`.
"""
state_tensor = np.reshape(state, [-1, self.state_dim])
return self._session.run(
self.action_function_network,
feed_dict={
self._state_tensor: state_tensor,
})
def predict_q_function(self, state, action):
"""Predict Q function.
Args:
state: np.ndarray for state.
action: np.ndarray for action.
Returns:
Tensorfor the predicted Q value for the given `state` and `action` pair.
"""
state_tensor = np.reshape(state, [-1, self.state_dim])
action_tensor = np.reshape(action, [-1, self.action_dim])
return self._session.run(
self.q_function_network,
feed_dict={
self._state_tensor: state_tensor,
self._action_tensor: action_tensor
})
def predict_state_perturbed_q_function(self, centroid_states,
centroid_actions, state_deviation):
"""Predict state perturbed Q function.
Args:
centroid_states: np.ndarray for centroid states.
centroid_actions: np.ndarray for the actions of the centroid states.
state_deviation: np.ndarray for the vector distance between non-centroid
states and their centroids.
Returns:
Tensor for the predicted Q values for the non-centroid states.
"""
centroid_states = np.reshape(centroid_states, [-1, self.state_dim])
centroid_actions = np.reshape(centroid_actions, [-1, self.action_dim])
state_deviation = np.reshape(state_deviation, [-1, self.state_dim])
return self._session.run(
self.state_perturbed_q_function_network,
feed_dict={
self._state_tensor: centroid_states,
self._action_tensor: centroid_actions,
self._state_deviation_tensor: state_deviation
})
def predict_lambda_function(self, state, action):
"""Predict lambda function.
Args:
state: np.ndarray for state.
action: np.ndarray for action.
Returns:
Tensor for the predicted lambda for the given `state` and `action` pair.
"""
state_tensor = np.reshape(state, [-1, self.state_dim])
action_tensor = np.reshape(action, [-1, self.action_dim])
return self._session.run(
self.lambda_function_network,
feed_dict={
self._state_tensor: state_tensor,
self._action_tensor: action_tensor
})
def compute_backup(self, maxq_labels, rewards, dones, discount_factor):
"""Compute Bellman backup.
Args:
maxq_labels: np.ndarray for max-Q labels.
rewards: np.ndarray for immediate rewards.
dones: np.ndarray for done flags. True if a state is a terminating state,
False otherwise.
discount_factor: float. Discount factor gamma.
Returns:
Tensor for TD targets.
"""
maxq_label = np.reshape(maxq_labels, [-1, 1])
reward_tensor = np.reshape(rewards, [-1, 1])
done_tensor = np.reshape(dones, [-1, 1])
feed = {
self._maxq_label: maxq_label,
self._reward_tensor: reward_tensor,
self._done_tensor: done_tensor,
self._discount_factor: discount_factor
}
return self._session.run(self._backup_tensor, feed_dict=feed)
def compute_td_rmse(self, states, actions, maxq_labels, rewards, dones,
discount_factor):
"""Compute TD rmse.
Args:
states: np.ndarray for states.
actions: np.ndarray for actions.
maxq_labels: np.ndarray for max-Q labels.
rewards: np.ndarray for immediate rewards.
dones: np.ndarray for done flags. True if a state is a terminating state,
False otherwise.
discount_factor: float. Discount factor gamma.
Returns:
Tensor for TD RMSE.
"""
state_tensor = np.reshape(states, [-1, self.state_spec.shape[0]])
action_tensor = np.reshape(actions, [-1, self.action_spec.shape[0]])
maxq_label = np.reshape(maxq_labels, [-1, 1])
reward_tensor = np.reshape(rewards, [-1, 1])
done_tensor = np.reshape(dones, [-1, 1])
feed = {
self._state_tensor: state_tensor,
self._action_tensor: action_tensor,
self._maxq_label: maxq_label,
self._reward_tensor: reward_tensor,
self._done_tensor: done_tensor,
self._discount_factor: discount_factor
}
return self._session.run(self._td_rmse, feed_dict=feed)
def compute_dual_active_constraint_condition(self, states, actions,
dual_maxq_labels, rewards, dones,
discount_factor):
"""Compute dual active constraint condition.
Args:
states: np.ndarray for states.
actions: np.ndarray for actions.
dual_maxq_labels: np.ndarray for max-Q labels computed by dual method.
rewards: np.ndarray for immediate rewards.
dones: np.ndarray for done flags. True if a state is a terminating state,
False otherwise.
discount_factor: float. Discount factor gamma.
Returns:
Tensor for bool flags. True if a TD target is larger than a predicted
Q value for a pair of state and action.
"""
state_tensor = np.reshape(states, [-1, self.state_dim])
action_tensor = np.reshape(actions, [-1, self.action_dim])
dual_maxq_label = np.reshape(dual_maxq_labels, [-1, 1])
reward_tensor = np.reshape(rewards, [-1, 1])
done_tensor = np.reshape(dones, [-1, 1])
feed = {
self._state_tensor: state_tensor,
self._action_tensor: action_tensor,
self._maxq_label: dual_maxq_label,
self._reward_tensor: reward_tensor,
self._done_tensor: done_tensor,
self._discount_factor: discount_factor
}
return self._session.run(
self.dual_active_constraint_condition_tensor, feed_dict=feed)
def compute_best_actions(self, states, tolerance, warmstart=True,
tf_summary_vals=None):
"""Compute best actions.
Args:
states: np.ndarray for states.
tolerance: float. Optimizer tolerance. This is used as a stopping
condition for the optimizer.
warmstart: bool on warmstarting flag.
tf_summary_vals: list to store tf.Summary.Value objects.
Returns:
Tensor for the best actions for the given `states`.
"""
state_tensor = np.reshape(states, [-1, self.state_dim])
assert len(state_tensor) > 0
if tf_summary_vals is not None:
tf_summary_vals.append(
tf.Summary.Value(tag="tolerance", simple_value=tolerance))
# profiling the batch action maximization.
ts_begin = time.time()
if self.solver == "gradient_ascent":
if not hasattr(self, "_action_init_tensor"):
print("Create action variables for gradient ascent.")
self._create_gradient_ascent_action_tensors()
best_actions = self.gradient_ascent_best_actions(state_tensor, tolerance,
warmstart,
tf_summary_vals)
elif self.solver == "cross_entropy":
if not hasattr(self, "_action_init_tensor"):
print("Create action variables for cross entropy.")
self._create_cross_entropy_action_tensors()
best_actions = self.cross_entropy_best_actions(state_tensor, tolerance,
warmstart, tf_summary_vals)
elif self.solver == "ails" or self.solver == "mip":
raise ValueError("AILS and MIP solvers are not supported yet.")
else:
raise ValueError("Solver is not implemented!")
elapsed_in_msecs = int((time.time() - ts_begin) * 1000)
if tf_summary_vals is not None:
tf_summary_vals.append(
tf.Summary.Value(
tag="batch_maxq/elapsed_msec", simple_value=elapsed_in_msecs))
return best_actions
def cross_entropy_best_actions(self, state_tensor, tolerance_tensor,
warmstart, tf_summary_vals=None):
"""Get best action with cross entropy for train network."""
dynamic_batch_size = len(state_tensor)
if warmstart:
action_init_tensor = self.predict_action_function(state_tensor)
else:
# randomly sample actions
action_init_tensor = self.action_min + np.random.rand(
dynamic_batch_size, self.action_dim) * (
self.action_max - self.action_min)
feed = {
self._state_tensor: state_tensor,
self._tolerance_tensor: tolerance_tensor,
self._action_init_tensor: action_init_tensor,
self._dynamic_batch_size: dynamic_batch_size
}
vars_vals = self._session.run(self._vars_tf)
for var, val in zip(self._vars_tf, vars_vals):
feed[self._dummy_network_var_ph["{}_ph".format(var.name)]] = val
# 1) maximize actions through cross entropy
result = self._session.run(self.cost_optimizer, feed_dict=feed)
if tf_summary_vals is not None:
tf_summary_vals.append(
tf.Summary.Value(tag="batch_maxq/iterations", simple_value=result[0]))
# itr, cond_terminate, sample_mean, sample_covariance_diag,
# top_k_value, top_k_actions
top_k_actions = result[-1]
return top_k_actions[:, 0, :]
def gradient_ascent_best_actions(self, state_tensor, tolerance_tensor,
warmstart, tf_summary_vals=None):
"""Get best action with gradient ascent for train network."""
dynamic_batch_size = len(state_tensor)
if warmstart:
action_init_tensor = self.predict_action_function(state_tensor)
else:
# randomly sample actions
action_init_tensor = self.action_min + np.random.rand(
dynamic_batch_size, self.action_dim) * (
self.action_max - self.action_min)
# 1) initialize tensors in feed_dict
feed = {
self._state_tensor: state_tensor,
self._tolerance_tensor: tolerance_tensor,
self._action_init_tensor: action_init_tensor
}
vars_vals = self._session.run(self._vars_tf)
for var, val in zip(self._vars_tf, vars_vals):
feed[self._dummy_network_var_ph["{}_ph".format(var.name)]] = val
# 2) initialize action variable in dummy q_network
self._session.run(self.action_init_op, feed_dict=feed)
# 3) maximize actions through gradient ascent
result = self._session.run(self.cost_optimizer, feed_dict=feed)
if tf_summary_vals is not None:
tf_summary_vals.append(
tf.Summary.Value(tag="batch_maxq/iterations", simple_value=result[0]))
# 4) get max action solutions
return self._action_projection(
self._session.run(self._action_variable_tensor))
def compute_dual_maxq_label(self, next_states):
"""Compute max Q label via the dual method.
Args:
next_states: np.ndarray for states.
Returns:
Tensor for the best action for the given `next_states` computed by the
duality.
"""
feed = {self._next_state_tensor: next_states}
vars_vals = self._session.run(self._vars_tf)
for var, val in zip(self._vars_tf, vars_vals):
feed[self._dummy_network_var_ph["{}_ph".format(var.name)]] = val
return self._session.run(self.dual_maxq_tensor, feed_dict=feed)
def batch_train_action_function(self, state_tensor_stack, best_q_stack):
"""Train action function.
Args:
state_tensor_stack: np.ndarray for states.
best_q_stack: np.ndarray for the max-Q labels.
Returns:
TF op for the action function loss.
"""
feed = {
self._state_tensor: state_tensor_stack,
self._best_q_label: best_q_stack,
}
vars_vals = self._session.run(self._vars_tf)
for var, val in zip(self._vars_tf, vars_vals):
feed[self._dummy_network_var_ph["{}_ph".format(var.name)]] = val
action_function_loss, _ = self._session.run(
[self._action_function_loss, self._action_function_optimizer],
feed_dict=feed)
return action_function_loss
def batch_train_q_function(self, state_tensor_stack, action_tensor_stack,
true_label_stack):
"""Train Q function function.
Args:
state_tensor_stack: np.ndarray for states.
action_tensor_stack: np.ndarray for actions.
true_label_stack: np.ndarray for the TD targets.
Returns:
TF op for the Q function loss.
"""
feed = {
self._state_tensor: state_tensor_stack,
self._action_tensor: action_tensor_stack,
self._true_label: true_label_stack,
}
q_function_loss, _ = self._session.run(
[self._q_function_loss, self._q_function_optimizer], feed_dict=feed)
return q_function_loss
def batch_train_lambda_function(self, state_tensor_stack, action_tensor_stack,
true_label_stack):
"""Train lambda function.
Args:
state_tensor_stack: np.ndarray for states.
action_tensor_stack: np.ndarray for actions.
true_label_stack: np.ndarray for the TD targets.
Returns:
TF op for the lambda function loss.
"""
feed = {
self._state_tensor: state_tensor_stack,
self._action_tensor: action_tensor_stack,
self._true_label: true_label_stack,
}
lambda_function_loss, _ = self._session.run(
[self._lambda_function_loss, self._lambda_function_optimizer],
feed_dict=feed)
return lambda_function_loss
|
# -*- coding: utf-8 -*-
# 设计模式:过程式编程
# Form implementation generated from reading ui file 'test3.ui'
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import time
import sys
from PyQt5.QtCore import QTimer, QDateTime
from PyQt5.QtCore import QTimer
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import QGraphicsDropShadowEffect
import multiprocessing
import threading
from duoji import duoji
from face_detection import face_detection
class Ui_MainWindow(object):
def __init__(self):
# global main_screen_picture_number
self.main_screen_picture_number = 4
self.pictures_dict = {
1: "background-image:url(E:/python learing/GUI/垃圾分类污染图1.png)",
2: "background-image:url(E:/python learing/GUI/垃圾分类污染图2.png)",
3: "background-image:url(E:/python learing/GUI/垃圾分类污染图3.png)",
4: "background-image:url(E:/python learing/GUI/垃圾分类污染图4.png)"}
self.relaxingScreen_pictures_total = 1 # 动画13帧
self.relaxingScreen_pictures_dict = {
1: "background-image:url(E:/python learing/GUI/faceChanging 1.jpg)",
2: "background-image:url(E:/python learing/GUI/faceChanging 2.jpg)",
3: "background-image:url(E:/python learing/GUI/faceChanging 3.jpg)",
4: "background-image:url(E:/python learing/GUI/faceChanging 4.png)",
5: "background-image:url(E:/python learing/GUI/faceChanging 5.png)",
6: "background-image:url(E:/python learing/GUI/faceChanging 5.png)",
7: "background-image:url(E:/python learing/GUI/faceChanging 5.png)",
8: "background-image:url(E:/python learing/GUI/faceChanging 5.png)",
9: "background-image:url(E:/python learing/GUI/faceChanging 5.png)",
10: "background-image:url(E:/python learing/GUI/faceChanging 4.png)",
11: "background-image:url(E:/python learing/GUI/faceChanging 3.jpg)",
12: "background-image:url(E:/python learing/GUI/faceChanging 2.jpg)",
13: "background-image:url(E:/python learing/GUI/faceChanging 1.jpg)",
}
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(1280, 800)
MainWindow.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
MainWindow.setWindowOpacity(1.0) # set the opacity of the window. When set to 0, the window is invisible
MainWindow.setStyleSheet("background-image:url(E:/python learing/GUI/新背景图片.png)")
# self.centralwidget = QtWidgets.QWidget(MainWindow)
# self.centralwidget.setObjectName("centralwidget")
# 空白处按钮
self.pushButton_4 = QtWidgets.QPushButton(MainWindow)
self.pushButton_4.setGeometry(QtCore.QRect(-10, -10, 1380, 900))
self.pushButton_4.setAutoFillBackground(False)
self.pushButton_4.setObjectName("pushButton4")
self.pushButton_4.setIconSize(QtCore.QSize(1380, 900))
self.pushButton_4.clicked.connect(self.clickButton4)
# 可回收按钮
self.pushButton = QtWidgets.QPushButton(MainWindow)
# self.pushButton.keyPressEvent()
# print('Is recycle button been pushed? ',self.pushButton.clicked())
self.pushButton.setGeometry(QtCore.QRect(150, 100, 120, 120))
self.pushButton.setAutoFillBackground(False)
self.pushButton.setStyleSheet("background-color:rgb(170,170,225);\n"
"\n"
"border-radius:60px\n"
"")
self.pushButton.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("E:/python learing/GUI/新可回收标志.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.pushButton.setIcon(icon)
self.pushButton.setIconSize(QtCore.QSize(120, 120))
self.pushButton.setObjectName("pushButton")
# 绑定点击事件
self.pushButton.clicked.connect(self.clickButton1)
# 不可回收按钮
self.pushButton_2 = QtWidgets.QPushButton(MainWindow)
self.pushButton_2.setGeometry(QtCore.QRect(150, 300, 120, 120))
self.pushButton_2.setAutoFillBackground(False)
self.pushButton_2.setStyleSheet("border-radius:60px\n"
"\n"
"\n"
"")
self.pushButton_2.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("E:/python learing/GUI/新不可回收标志.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.pushButton_2.setIcon(icon1)
self.pushButton_2.setIconSize(QtCore.QSize(120, 120))
self.pushButton_2.setObjectName("pushButton2")
# 绑定点击事件
self.pushButton_2.clicked.connect(self.clickButton2)
# 清洁模式按钮
self.pushButton_3 = QtWidgets.QPushButton(MainWindow)
self.pushButton_3.setGeometry(QtCore.QRect(150, 500, 120, 120))
self.pushButton_3.setAutoFillBackground(False)
self.pushButton_3.setStyleSheet("border-radius:60px\n"
"\n"
"\n"
"")
self.pushButton_3.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("E:/python learing/GUI/清洁模式标志.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.pushButton_3.setIcon(icon2)
self.pushButton_3.setIconSize(QtCore.QSize(120, 120))
self.pushButton_3.setObjectName("pushButton3")
# 绑定点击事件
self.pushButton_3.clicked.connect(self.clickButton3)
# 创建label
# 核对垃圾信息窗口
self.label_4 = QtWidgets.QLabel(MainWindow)
self.label_4.setGeometry(QtCore.QRect(305, 100, 855, 530))
self.label_4.setStyleSheet("background-image:url(E:/python learing/GUI/可回收垃圾核对.png)")
self.label_4.setObjectName("label4")
self.label_4.setVisible(False)
self.label_5 = QtWidgets.QLabel(MainWindow)
self.label_5.setGeometry(QtCore.QRect(305, 100, 855, 530))
self.label_5.setStyleSheet("background-image:url(E:/python learing/GUI/不可回收垃圾核对.png)")
self.label_5.setObjectName("label5")
self.label_5.setVisible(False)
# 再次点击提示
self.label = QtWidgets.QLabel(MainWindow)
self.label.setGeometry(QtCore.QRect(0, 0, 1280, 43))
self.label.setStyleSheet("background-image:url(E:/python learing/GUI/请确认您的垃圾受否属于该种类,再次点击将开盖.png)")
self.label.setObjectName("label1")
self.label.setVisible(False)
# 正在开盖提示
self.label_2 = QtWidgets.QLabel(MainWindow)
self.label_2.setGeometry(QtCore.QRect(0, 0, 1280, 800))
self.label_2.setStyleSheet("background-image:url(E:/python learing/GUI/正在开盖,请稍等…….png)")
self.label_2.setObjectName("label2")
self.label_2.setVisible(False)
# 正在关闭提示
self.label_3 = QtWidgets.QLabel(MainWindow)
self.label_3.setGeometry(QtCore.QRect(0, 0, 1280, 800))
self.label_3.setStyleSheet("background-image:url(E:/python learing/GUI/正在关闭,请稍等…….png)")
self.label_3.setObjectName("label3")
self.label_3.setVisible(False)
# 垃圾分类宣传图片
self.label_6 = QtWidgets.QLabel(MainWindow)
self.label_6.setGeometry(QtCore.QRect(305, 100, 855, 530))
self.label_6.setStyleSheet("background-image:url(E:/python learing/GUI/垃圾分类污染图1.png)\n")
self.label_6.setObjectName("AD1")
self.label_6.setVisible(True)
# 关闭盖子按钮
self.pushButton_5 = QtWidgets.QPushButton(MainWindow)
self.pushButton_5.setGeometry(QtCore.QRect(554, 617, 153, 88))
self.pushButton_5.setAutoFillBackground(False)
self.pushButton_5.setStyleSheet("border-radius:10px\n"
"\n"
"\n"
"")
self.pushButton_5.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("E:/python learing/GUI/关闭按钮.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.pushButton_5.setIcon(icon5)
self.pushButton_5.setIconSize(QtCore.QSize(153, 88))
self.pushButton_5.setObjectName("pushButton5")
self.pushButton_5.setVisible(False)
self.pushButton_5.clicked.connect(self.closeCover)
# 息屏按钮
self.pushButton_screenRelax = QtWidgets.QPushButton(MainWindow)
self.pushButton_screenRelax.setGeometry(QtCore.QRect(0, 0, 1280, 800))
self.pushButton_screenRelax.setAutoFillBackground(False)
self.pushButton_screenRelax.setObjectName("pushButton_screenRelax")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("background-image:url(E:/python learing/GUI/faceChanging 1.jpg)"), QtGui.QIcon.Normal,
QtGui.QIcon.On)
self.pushButton_screenRelax.setIcon(icon6)
self.pushButton_screenRelax.setIconSize(QtCore.QSize(1280, 800))
self.pushButton_screenRelax.clicked.connect(self.clickButtonScreenRelax)
# 按钮添加阴影
self.add_shadow(self.pushButton)
self.add_shadow(self.pushButton_2)
self.add_shadow(self.pushButton_3)
self.add_shadow(self.pushButton_4)
self.add_shadow(self.pushButton_5)
self.add_shadow2(self.label)
self.add_shadow2(self.label_2)
self.add_shadow2(self.label_3)
self.add_shadow2(self.label_4)
self.add_shadow2(self.label_5)
self.add_shadow2(self.label_6)
# MainWindow.setCentralWidget(self.centralwidget)
# self.menubar = QtWidgets.QMenuBar(MainWindow)
# self.menubar.setGeometry(QtCore.QRect(0, 0, 1280, 26))
# self.menubar.setObjectName("menubar")
# MainWindow.setMenuBar(self.menubar)
# self.statusbar = QtWidgets.QStatusBar(MainWindow)
# self.statusbar.setObjectName("statusbar")
# MainWindow.setStatusBar(self.statusbar)
# 下面的功能是创建了一个定时线程,当该线程的回调函数被执行时,宣传图片会刷新
# self.timer_create()
# 定义按钮点击事件
def clickButton1(self):
if self.pushButton.objectName() == "pushButton_beforeOpen":
print("1111")
self.pushButton.setObjectName("pushButton_afterOpen")
# self.label_4.setVisible(False)
self.openCover()
self.timer_for_auto_relax()
if self.pushButton_2.objectName() == "pushButton2" and self.pushButton_3.objectName() == "pushButton3" and self.pushButton.objectName() == "pushButton":
print("111")
self.pushButton.setObjectName("pushButton_beforeOpen")
self.pushButton.setIcon(QIcon(QPixmap("E:/python learing/GUI/确认标志.png"))) # 点击后切换按钮图片
self.label.setVisible(True)
self.label_4.setVisible(True)
self.label_6.setVisible(False)
self.timer_for_auto_relax()
def clickButton2(self):
if self.pushButton_2.objectName() == "pushButton2_beforeOpen":
print("1111")
self.pushButton_2.setObjectName("pushButton2_afterOpen")
self.openCover()
self.timer_for_auto_relax()
if self.pushButton.objectName() == "pushButton" and self.pushButton_3.objectName() == "pushButton3" and self.pushButton_2.objectName() == "pushButton2":
print("222")
self.pushButton_2.setIcon(QIcon(QPixmap("E:/python learing/GUI/确认标志.png"))) # 点击后切换按钮图片
self.pushButton_2.setObjectName("pushButton2_beforeOpen")
self.label.setVisible(True)
self.label_5.setVisible(True)
self.label_6.setVisible(False)
self.timer_for_auto_relax()
def clickButton3(self):
if self.pushButton_3.objectName() == "pushButton3_beforeOpen":
print("1111")
self.pushButton_3.setObjectName("pushButton3_afterOpen")
# self.openCover()
self.timer_for_auto_relax()
if self.pushButton.objectName() == "pushButton" and self.pushButton_2.objectName() == "pushButton2" and self.pushButton_3.objectName() == "pushButton3":
print("333")
self.pushButton_3.setIcon(QIcon(QPixmap("E:/python learing/GUI/确认标志.png"))) # 点击后切换按钮图片
self.pushButton_3.setObjectName("pushButton3_beforeOpen")
self.label.setVisible(True)
self.label_6.setVisible(False)
self.timer_for_auto_relax()
def clickButton4(self):
print("444")
self.pushButton.setIcon(QIcon(QPixmap("E:/python learing/GUI/新可回收标志.png"))) # 点击后切换按钮图片
self.pushButton_2.setIcon(QIcon(QPixmap("E:/python learing/GUI/新不可回收标志.png"))) # 点击后切换按钮图片
self.pushButton_3.setIcon(QIcon(QPixmap("E:/python learing/GUI/清洁模式标志.png"))) # 点击后切换按钮图片
self.pushButton.setObjectName("pushButton")
self.pushButton_2.setObjectName("pushButton2")
self.pushButton_3.setObjectName("pushButton3")
self.label.setVisible(False)
self.label_4.setVisible(False)
self.label_5.setVisible(False)
self.label_6.setVisible(True)
self.timer_for_auto_relax()
# self.timer_for_pictures_changing()
def clickButtonScreenRelax(self):
print("working lo")
self.pushButton_screenRelax.setVisible(False)
self.pushButton_screenRelax.setObjectName("screenRelax_clicked")
self.timer_for_pictures_changing()
self.timer_for_auto_relax()
# 开盖丢垃圾程序
def openCover(self):
# global MainWindow
print("openning")
self.label.setVisible(False)
self.label_5.setVisible(False)
self.label_4.setVisible(False)
self.label.repaint()
self.label_4.repaint()
print("opening 2")
self.label_2.setVisible(True)
self.label_2.repaint() # 缓冲协调#########################################
# 现在我们尝试在这里用多线程同时控制两个任务:任务1是灯条闪烁;任务2是舵机转动开盖
p1 = threading.Thread(target=face_detection().light_shinning)
p2 = threading.Thread(target=duoji().motor_control)
p1.start()
p2.start()
time.sleep(4) # 开盖等待时间
self.pushButton_5.setVisible(True)
# 关盖程序
def closeCover(self):
print("close")
self.label_2.setVisible(False)
self.pushButton_5.setVisible(False)
self.label_3.setVisible(True)
self.label_3.repaint()
time.sleep(4) # 关盖等待时间
self.label_3.setVisible(False)
self.clickButton4()
# 按钮阴影特效
def add_shadow(self, button):
# 添加阴影
self.effect_shadow = QGraphicsDropShadowEffect(self.centralwidget)
self.effect_shadow.setOffset(0, 0) # 偏移
self.effect_shadow.setBlurRadius(10) # 阴影半径
self.effect_shadow.setColor(QtCore.Qt.black) # 阴影颜色
button.setGraphicsEffect(self.effect_shadow) # 将设置套用到button窗口中
def add_shadow2(self, label):
# 添加阴影
self.effect_shadow = QGraphicsDropShadowEffect(self.centralwidget)
self.effect_shadow.setOffset(0, 0) # 偏移
self.effect_shadow.setBlurRadius(10) # 阴影半径
self.effect_shadow.setColor(QtCore.Qt.black) # 阴影颜色
label.setGraphicsEffect(self.effect_shadow) # 将设置套用到label窗口中
def change_mainwindow_pictures(self):
if self.main_screen_picture_number == 4:
self.main_screen_picture_number = 1
else:
self.main_screen_picture_number += 1
return self.pictures_dict.get(self.main_screen_picture_number) # 得到第N个图片
def change_faceChaing_pictures(self):
if self.relaxingScreen_pictures_total == 13:
self.relaxingScreen_pictures_total = 1
else:
self.relaxingScreen_pictures_total += 1
return self.relaxingScreen_pictures_dict.get(self.relaxingScreen_pictures_total)
def back_to_relax(self):
self.pushButton_screenRelax.setVisible(True)
#self.pushButton_screenRelax.repaint()
#self.label_6.repaint()
self.pushButton_screenRelax.setObjectName("pushButton_screenRelax")
self.timer_for_faceChaing()
def timer_for_pictures_changing(self):
timer = threading.Timer(3, self.timer_for_pictures_changing) # x秒后就换图片
timer.setDaemon(True) # 守护线程
if self.pushButton_screenRelax.objectName() != "pushButton_screenRelax":
timer.start()
print('杰哥不要啊!!!')
self.label_6.setStyleSheet(self.change_mainwindow_pictures())
#self.label_6.repaint()
def timer_for_faceChaing(self):
timer3 = threading.Timer(0.3, self.timer_for_faceChaing)
timer3.setDaemon(True)
# if self.pushButton_screenRelax.isVisible():
if self.pushButton_screenRelax.objectName() == "pushButton_screenRelax": # why"is Visbel" cannnot work
timer3.start()
print("change")
#self.pushButton_screenRelax.update()
self.pushButton_screenRelax.setStyleSheet(self.change_faceChaing_pictures())
#self.pushButton_screenRelax.repaint()
# self.pushButton_screenRelax.setStyleSheet(self.change_faceChaing_pictures())
# self.pushButton_screenRelax.repaint()
def timer_for_auto_relax(self):
time4 = threading.Timer(10, self.back_to_relax)
time4.setDaemon(True)
print("waiting for relax5555")
#time4.start()
def timer_create(): # 示例
time_calculate = threading.Timer(6, timer_create)
time_calculate.setDaemon(True)
time_calculate.start()
print('杰哥不要啊!!!')
# self.label_6.setStyleSheet(self.change_mainwindow_pictures())
if __name__ == '__main__':
multiprocessing.freeze_support()
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QWidget()
ui = Ui_MainWindow() # 调用主函数
ui.setupUi(MainWindow) # 初始化
#ui.timer_for_pictures_changing()
# ui.timer_for_faceChaing()
MainWindow.show() # 显示
sys.exit(app.exec_()) # 点击关闭才退出
|
"""
author: <NAME> & <NAME>
code to generate synthetic data from stock-model SDEs
"""
# ==============================================================================
from math import sqrt, exp
import numpy as np
import matplotlib.pyplot as plt
import copy, os
# ==============================================================================
class StockModel:
"""
mother class for all stock models defining the variables and methods shared
amongst all of them, some need to be defined individually
"""
def __init__(self, drift, volatility, S0, nb_paths, nb_steps,
maturity, sine_coeff, **kwargs):
self.drift = drift
self.volatility = volatility
self.S0 = S0
self.nb_paths = nb_paths
self.nb_steps = nb_steps
self.maturity = maturity
self.dimensions = np.size(S0)
if sine_coeff is None:
self.periodic_coeff = lambda t: 1
else:
self.periodic_coeff = lambda t: (1 + np.sin(sine_coeff * t))
def generate_paths(self, **options):
"""
generate random paths according to the model hyperparams
:return: stock paths as np.array, dim: [nb_paths, data_dim, nb_steps]
"""
raise ValueError("not implemented yet")
def next_cond_exp(self, *args, **kwargs):
"""
compute the next point of the conditional expectation starting from
given point for given time_delta
:return: cond. exp. at next time_point (= current_time + time_delta)
"""
raise ValueError("not implemented yet")
def compute_cond_exp(self, times, time_ptr, X, obs_idx, delta_t, T, start_X,
n_obs_ot, return_path=True, get_loss=False,
weight=0.5,
start_time=None,
**kwargs):
"""
compute conditional expectation similar to computing the prediction in
the model.NJODE.forward
:param times: see model.NJODE.forward
:param time_ptr: see model.NJODE.forward
:param X: see model.NJODE.forward, as np.array
:param obs_idx: see model.NJODE.forward, as np.array
:param delta_t: see model.NJODE.forward, as np.array
:param T: see model.NJODE.forward
:param start_X: see model.NJODE.forward, as np.array
:param n_obs_ot: see model.NJODE.forward, as np.array
:param return_path: see model.NJODE.forward
:param get_loss: see model.NJODE.forward
:param weight: see model.NJODE.forward
:param start_time: None or float, if float, this is first time point
:param kwargs: unused, to allow for additional unused inputs
:return: float (loss), if wanted paths of t and y (np.arrays)
"""
y = start_X
batch_size = start_X.shape[0]
current_time = 0.0
if start_time:
current_time = start_time
loss = 0
if return_path:
if start_time:
path_t = []
path_y = []
else:
path_t = [0.]
path_y = [y]
for i, obs_time in enumerate(times):
if obs_time > T + 1e-10:
break
if obs_time <= current_time:
continue
# Propagation of the ODE until next observation
while current_time < (
obs_time - 1e-10 * delta_t): # 1e-10*delta_t used for numerical consistency.
if current_time < obs_time - delta_t:
delta_t_ = delta_t
else:
delta_t_ = obs_time - current_time
y = self.next_cond_exp(y, delta_t_, current_time)
current_time = current_time + delta_t_
# Storing the predictions.
if return_path:
path_t.append(current_time)
path_y.append(y)
# Reached an observation - only update those elements of the batch,
# for which an observation is made
start = time_ptr[i]
end = time_ptr[i + 1]
X_obs = X[start:end]
i_obs = obs_idx[start:end]
# Using RNNCell to update h. Also updating loss, tau and last_X
Y_bj = y
temp = copy.copy(y)
temp[i_obs] = X_obs
y = temp
Y = y
if get_loss:
loss = loss + compute_loss(X_obs=X_obs, Y_obs=Y[i_obs],
Y_obs_bj=Y_bj[i_obs],
n_obs_ot=n_obs_ot[i_obs],
batch_size=batch_size, weight=weight)
if return_path:
path_t.append(obs_time)
path_y.append(y)
# after every observation has been processed, propagating until T
while current_time < T - 1e-10 * delta_t:
if current_time < T - delta_t:
delta_t_ = delta_t
else:
delta_t_ = T - current_time
y = self.next_cond_exp(y, delta_t_)
current_time = current_time + delta_t_
# Storing the predictions.
if return_path:
path_t.append(current_time)
path_y.append(y)
if return_path:
# path dimension: [time_steps, batch_size, output_size]
return loss, np.array(path_t), np.array(path_y)
else:
return loss
def get_optimal_loss(self, times, time_ptr, X, obs_idx, delta_t, T, start_X,
n_obs_ot, weight=0.5):
loss = self.compute_cond_exp(
times, time_ptr, X, obs_idx, delta_t, T, start_X, n_obs_ot,
return_path=False, get_loss=True, weight=weight)
return loss
class Heston(StockModel):
"""
the Heston model, see: https://en.wikipedia.org/wiki/Heston_model
a basic stochastic volatility stock price model
"""
def __init__(self, drift, volatility, mean, speed, correlation, nb_paths,
nb_steps, S0, maturity, sine_coeff=None, **kwargs):
super(Heston, self).__init__(
drift=drift, volatility=volatility, nb_paths=nb_paths,
nb_steps=nb_steps,
S0=S0, maturity=maturity,
sine_coeff=sine_coeff
)
self.mean = mean
self.speed = speed
self.correlation = correlation
def next_cond_exp(self, y, delta_t, current_t):
return y * np.exp(self.drift*self.periodic_coeff(current_t)*delta_t)
def generate_paths(self, start_X=None):
# Diffusion of the spot: dS = mu*S*dt + sqrt(v)*S*dW
spot_drift = lambda x, t: self.drift*self.periodic_coeff(t)*x
spot_diffusion = lambda x, v, t: np.sqrt(v) * x
# Diffusion of the variance: dv = -k(v-vinf)*dt + sqrt(v)*v*dW
var_drift = lambda v, t: - self.speed * (v - self.mean)
var_diffusion = lambda v, t: self.volatility * np.sqrt(v)
spot_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
var_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
dt = self.maturity / self.nb_steps
if start_X is not None:
spot_paths[:, :, 0] = start_X
for i in range(self.nb_paths):
if start_X is None:
spot_paths[i, :, 0] = self.S0
var_paths[i, :, 0] = self.mean
for k in range(1, self.nb_steps + 1):
normal_numbers_1 = np.random.normal(0, 1, self.dimensions)
normal_numbers_2 = np.random.normal(0, 1, self.dimensions)
dW = normal_numbers_1 * np.sqrt(dt)
dZ = (self.correlation * normal_numbers_1 + np.sqrt(
1 - self.correlation ** 2) * normal_numbers_2) * np.sqrt(dt)
var_paths[i, :, k] = (
var_paths[i, :, k - 1]
+ var_drift(var_paths[i, :, k - 1], (k) * dt) * dt
+ var_diffusion(var_paths[i, :, k - 1], (k) * dt) * dZ)
spot_paths[i, :, k] = (
spot_paths[i, :, k - 1]
+ spot_drift(spot_paths[i, :, k - 1], (k-1) * dt) * dt
+ spot_diffusion(spot_paths[i, :, k - 1],
var_paths[i, :, k],
(k) * dt) * dW)
# stock_path dimension: [nb_paths, dimension, time_steps]
return spot_paths, dt
def draw_path_heston(self, filename):
nb_paths = self.nb_paths
self.nb_paths = 1
paths, dt = self.generate_paths()
self.nb_paths = nb_paths
spot_paths, var_paths = paths
one_spot_path = spot_paths[0, 0, :]
one_var_path = var_paths[0, 0, :]
dates = np.array([i for i in range(len(one_spot_path))])
dt = self.maturity / self.nb_steps
fig, ax1 = plt.subplots()
color = 'tab:blue'
ax1.set_xlabel('time')
ax1.set_ylabel('Stock', color=color)
ax1.plot(dates, one_spot_path, color=color)
ax1.tick_params(axis='y', labelcolor=color)
color = 'tab:red'
ax2 = ax1.twinx()
ax2.set_ylabel('Variance', color=color)
ax2.plot(dates, one_var_path, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout()
plt.savefig(filename)
plt.close()
class HestonWOFeller(StockModel):
"""
the Heston model, see: https://en.wikipedia.org/wiki/Heston_model
a basic stochastic volatility stock price model, that can be used
even if Feller condition is not satisfied
Feller condition: 2*speed*mean > volatility**2
"""
def __init__(self, drift, volatility, mean, speed, correlation, nb_paths,
nb_steps, S0, maturity, scheme='euler', return_vol=False,
v0=None, sine_coeff=None, **kwargs):
super(HestonWOFeller, self).__init__(
drift=drift, volatility=volatility, nb_paths=nb_paths,
nb_steps=nb_steps,
S0=S0, maturity=maturity,
sine_coeff=sine_coeff
)
self.mean = mean
self.speed = speed
self.correlation = correlation
self.scheme = scheme
self.retur_vol = return_vol
if v0 is None:
self.v0 = self.mean
else:
self.v0 = v0
def next_cond_exp(self, y, delta_t, current_t):
if self.retur_vol:
s, v = np.split(y, indices_or_sections=2, axis=1)
s = s * np.exp(self.drift*self.periodic_coeff(current_t)*delta_t)
exp_delta = np.exp(-self.speed * delta_t)
v = v * exp_delta + self.mean * (1 - exp_delta)
y = np.concatenate([s, v], axis=1)
return y
else:
return y*np.exp(self.drift*self.periodic_coeff(current_t)*delta_t)
def generate_paths(self, start_X=None):
if self.scheme == 'euler':
# Diffusion of the spot: dS = mu*S*dt + sqrt(v)*S*dW
log_spot_drift = lambda v, t: \
(self.drift*self.periodic_coeff(t) - 0.5 * np.maximum(v, 0))
log_spot_diffusion = lambda v: np.sqrt(np.maximum(v, 0))
# Diffusion of the variance: dv = -k(v-vinf)*dt + sqrt(v)*v*dW
var_drift = lambda v: - self.speed * (np.maximum(v, 0) - self.mean)
var_diffusion = lambda v: self.volatility * np.sqrt(np.maximum(v, 0))
spot_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
var_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
dt = self.maturity / self.nb_steps
if start_X is not None:
spot_paths[:, :, 0] = start_X
for i in range(self.nb_paths):
if start_X is None:
spot_paths[i, :, 0] = self.S0
var_paths[i, :, 0] = self.v0
for k in range(1, self.nb_steps + 1):
normal_numbers_1 = np.random.normal(0, 1, self.dimensions)
normal_numbers_2 = np.random.normal(0, 1, self.dimensions)
dW = normal_numbers_1 * np.sqrt(dt)
dZ = (self.correlation * normal_numbers_1 + np.sqrt(
1 - self.correlation ** 2) * normal_numbers_2) * np.sqrt(dt)
spot_paths[i, :, k] = np.exp(
np.log(spot_paths[i, :, k - 1])
+ log_spot_drift(
var_paths[i, :, k - 1], (k-1)*dt) * dt
+ log_spot_diffusion(var_paths[i, :, k - 1]) * dW
)
var_paths[i, :, k] = (
var_paths[i, :, k - 1]
+ var_drift(var_paths[i, :, k - 1]) * dt
+ var_diffusion(var_paths[i, :, k - 1]) * dZ
)
if self.retur_vol:
spot_paths = np.concatenate([spot_paths, var_paths], axis=1)
# stock_path dimension: [nb_paths, dimension, time_steps]
return spot_paths, dt
else:
raise ValueError('unknown sampling scheme')
class BlackScholes(StockModel):
"""
standard Black-Scholes model, see:
https://en.wikipedia.org/wiki/Black–Scholes_model
https://en.wikipedia.org/wiki/Geometric_Brownian_motion
"""
def __init__(self, drift, volatility, nb_paths, nb_steps, S0,
maturity, sine_coeff=None, **kwargs):
super(BlackScholes, self).__init__(
drift=drift, volatility=volatility, nb_paths=nb_paths,
nb_steps=nb_steps, S0=S0, maturity=maturity,
sine_coeff=sine_coeff
)
def next_cond_exp(self, y, delta_t, current_t):
return y * np.exp(self.drift*self.periodic_coeff(current_t)*delta_t)
def generate_paths(self, start_X=None):
drift = lambda x, t: self.drift*self.periodic_coeff(t)*x
diffusion = lambda x, t: self.volatility * x
spot_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
dt = self.maturity / self.nb_steps
if start_X is not None:
spot_paths[:, :, 0] = start_X
for i in range(self.nb_paths):
if start_X is None:
spot_paths[i, :, 0] = self.S0
for k in range(1, self.nb_steps + 1):
random_numbers = np.random.normal(0, 1, self.dimensions)
dW = random_numbers * np.sqrt(dt)
spot_paths[i, :, k] = (
spot_paths[i, :, k - 1]
+ drift(spot_paths[i, :, k - 1], (k-1) * dt) * dt
+ diffusion(spot_paths[i, :, k - 1], (k) * dt) * dW)
# stock_path dimension: [nb_paths, dimension, time_steps]
return spot_paths, dt
class OrnsteinUhlenbeck(StockModel):
"""
Ornstein-Uhlenbeeck stock model, see:
https://en.wikipedia.org/wiki/Ornstein–Uhlenbeck_process
"""
def __init__(self, volatility, nb_paths, nb_steps, S0,
mean, speed, maturity, sine_coeff=None, **kwargs):
super(OrnsteinUhlenbeck, self).__init__(
volatility=volatility, nb_paths=nb_paths, drift=None,
nb_steps=nb_steps, S0=S0, maturity=maturity,
sine_coeff=sine_coeff
)
self.mean = mean
self.speed = speed
def next_cond_exp(self, y, delta_t, current_t):
exp_delta = np.exp(-self.speed*self.periodic_coeff(current_t)*delta_t)
return y * exp_delta + self.mean * (1 - exp_delta)
def generate_paths(self, start_X=None):
# Diffusion of the variance: dv = -k(v-vinf)*dt + vol*dW
drift = lambda x, t: - self.speed*self.periodic_coeff(t)*(x - self.mean)
diffusion = lambda x, t: self.volatility
spot_paths = np.empty(
(self.nb_paths, self.dimensions, self.nb_steps + 1))
dt = self.maturity / self.nb_steps
if start_X is not None:
spot_paths[:, :, 0] = start_X
for i in range(self.nb_paths):
if start_X is None:
spot_paths[i, :, 0] = self.S0
for k in range(1, self.nb_steps + 1):
random_numbers = np.random.normal(0, 1, self.dimensions)
dW = random_numbers * np.sqrt(dt)
spot_paths[i, :, k] = (
spot_paths[i, :, k - 1]
+ drift(spot_paths[i, :, k - 1], (k-1) * dt) * dt
+ diffusion(spot_paths[i, :, k - 1], (k) * dt) * dW)
# stock_path dimension: [nb_paths, dimension, time_steps]
return spot_paths, dt
class Combined(StockModel):
def __init__(self, stock_model_names, hyperparam_dicts, **kwargs):
self.stock_model_names = stock_model_names
self.hyperparam_dicts = hyperparam_dicts
def compute_cond_exp(self, times, time_ptr, X, obs_idx, delta_t, T, start_X,
n_obs_ot, return_path=True, get_loss=False,
weight=0.5, **kwargs):
# get first stockmodel
stockmodel = STOCK_MODELS[self.stock_model_names[0]](
**self.hyperparam_dicts[0])
T = self.hyperparam_dicts[0]['maturity']
loss, path_t, path_y = stockmodel.compute_cond_exp(
times, time_ptr, X, obs_idx, delta_t,
T, start_X,
n_obs_ot, return_path=True, get_loss=get_loss,
weight=weight,
)
for i in range(1, len(self.stock_model_names)):
start_X = path_y[-1, :, :]
start_time = path_t[-1]
T += self.hyperparam_dicts[i]['maturity']
stockmodel = STOCK_MODELS[self.stock_model_names[i]](
**self.hyperparam_dicts[i])
_loss, _path_t, _path_y = stockmodel.compute_cond_exp(
times, time_ptr, X, obs_idx, delta_t,
T, start_X,
n_obs_ot, return_path=True, get_loss=get_loss,
weight=weight, start_time=start_time
)
loss += _loss
path_t = np.concatenate([path_t, _path_t])
path_y = np.concatenate([path_y, _path_y], axis=0)
if return_path:
# path dimension: [time_steps, batch_size, output_size]
return loss, np.array(path_t), np.array(path_y)
else:
return loss
def get_optimal_loss(self, times, time_ptr, X, obs_idx, delta_t, T, start_X,
n_obs_ot, weight=0.5):
loss = self.compute_cond_exp(
times, time_ptr, X, obs_idx, delta_t, T, start_X, n_obs_ot,
return_path=False, get_loss=True, weight=weight)
return loss
# ==============================================================================
# this is needed for computing the loss with the true conditional expectation
def compute_loss(X_obs, Y_obs, Y_obs_bj, n_obs_ot, batch_size, eps=1e-10,
weight=0.5):
"""
compute the loss of the true conditional expectation, as in
model.compute_loss
"""
inner = (2 * weight * np.sqrt(np.sum((X_obs - Y_obs) ** 2, axis=1) + eps) +
2 * (1 - weight) * np.sqrt(np.sum((Y_obs_bj - Y_obs) ** 2, axis=1)
+ eps)) ** 2
outer = np.sum(inner / n_obs_ot)
return outer / batch_size
# ==============================================================================
# dict for the supported stock models to get them from their name
STOCK_MODELS = {
"BlackScholes": BlackScholes,
"Heston": Heston,
"OrnsteinUhlenbeck": OrnsteinUhlenbeck,
"HestonWOFeller": HestonWOFeller,
"combined": Combined,
"sine_BlackScholes": BlackScholes,
"sine_Heston": Heston,
"sine_OrnsteinUhlenbeck": OrnsteinUhlenbeck,
}
# ==============================================================================
hyperparam_test_stock_models = {
'drift': 0.2, 'volatility': 0.3, 'mean': 0.5,
'speed': 0.5, 'correlation': 0.5, 'nb_paths': 10, 'nb_steps': 100,
'S0': 1, 'maturity': 1., 'dimension': 1}
def draw_stock_model(stock_model_name):
hyperparam_test_stock_models['model_name'] = stock_model_name
stockmodel = STOCK_MODELS[stock_model_name](**hyperparam_test_stock_models)
stock_paths, dt = stockmodel.generate_paths()
filename = '{}.pdf'.format(stock_model_name)
# draw a path
one_path = stock_paths[0, 0, :]
dates = np.array([i for i in range(len(one_path))])
cond_exp = np.zeros(len(one_path))
cond_exp[0] = hyperparam_test_stock_models['S0']
cond_exp_const = hyperparam_test_stock_models['S0']
for i in range(1, len(one_path)):
if i % 3 == 0:
cond_exp[i] = one_path[i]
else:
cond_exp[i] = cond_exp[i - 1] * exp(
hyperparam_test_stock_models['drift'] * dt)
plt.plot(dates, one_path, label='stock path')
plt.plot(dates, cond_exp, label='conditional expectation')
plt.legend()
plt.savefig(filename)
plt.close()
if __name__ == '__main__':
draw_stock_model("BlackScholes")
heston = STOCK_MODELS["Heston"](**hyperparam_test_stock_models)
heston.draw_path_heston("heston.pdf")
|
<filename>miniserver_gateway/connectors/shelly/api/gen1parser.py
#!/usr/bin/python3
# Copyright 2021. FastyBird s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shelly connector plugin api module parser for Gen 1 devices
"""
# Python base dependencies
import json
import re
import uuid
from typing import Any, Dict, List, Optional, Set, Tuple, TypeVar, Union
# Library dependencies
from fastnumbers import fast_float, fast_int
from modules_metadata.types import DataType, SwitchPayload
# Library libs
from miniserver_gateway.connectors.shelly.api.gen1validator import Gen1Validator
from miniserver_gateway.connectors.shelly.exceptions import (
LogicException,
ParsePayloadException,
)
from miniserver_gateway.connectors.shelly.receivers.entities import (
BaseEntity,
BlockDescriptionEntity,
DeviceDescriptionEntity,
DeviceDescriptionFromCoapEntity,
DeviceDescriptionFromHttpEntity,
DeviceExtendedStatusEntity,
DeviceInfoEntity,
DeviceStatusEntity,
SensorStateDescriptionEntity,
SensorStateStatusEntity,
)
from miniserver_gateway.connectors.shelly.registry.model import (
BlocksRegistry,
DevicesRegistry,
SensorsRegistry,
)
from miniserver_gateway.connectors.shelly.types import (
ClientMessageType,
SensorType,
SensorUnit,
WritableSensor,
)
from miniserver_gateway.connectors.shelly.utilities.helpers import DataTransformHelpers
T = TypeVar("T", bound=DeviceDescriptionEntity) # pylint: disable=invalid-name
class Gen1Parser(Gen1Validator):
"""
Gen 1 Shelly device message parser
@package FastyBird:ShellyConnectorPlugin!
@module api
@author <NAME> <<EMAIL>>
"""
__devices_registry: DevicesRegistry
__blocks_registry: BlocksRegistry
__sensors_registry: SensorsRegistry
# -----------------------------------------------------------------------------
def __init__(
self,
devices_registry: DevicesRegistry,
blocks_registry: BlocksRegistry,
sensors_registry: SensorsRegistry,
) -> None:
self.__devices_registry = devices_registry
self.__blocks_registry = blocks_registry
self.__sensors_registry = sensors_registry
# -----------------------------------------------------------------------------
def parse_coap_message( # pylint: disable=too-many-arguments
self,
device_identifier: str,
device_ip_address: str,
device_type: str,
message_payload: str,
message_type: ClientMessageType,
client_id: uuid.UUID,
) -> BaseEntity:
"""Parse message received via CoAP client and transform it to entity"""
if not self.validate_coap_message(
message_payload=message_payload,
message_type=message_type,
):
raise ParsePayloadException("Provided payload is not valid")
if self.validate_device_description_from_coap(message_payload=message_payload):
return self.parse_device_description_coap(
device_identifier=device_identifier,
device_type=device_type,
device_ip_address=device_ip_address,
message_payload=message_payload,
client_id=client_id,
)
if self.validate_device_status_from_coap(message_payload=message_payload):
return self.parse_device_status_coap(
device_identifier=device_identifier,
device_ip_address=device_ip_address,
message_payload=message_payload,
client_id=client_id,
)
raise ParsePayloadException("Provided payload is not valid")
# -----------------------------------------------------------------------------
def parse_http_message( # pylint: disable=too-many-arguments
self,
device_identifier: str,
device_ip_address: str,
message_payload: str,
message_type: ClientMessageType,
client_id: uuid.UUID,
) -> BaseEntity:
"""Parse message received via HTTP client and transform it to entity"""
if not self.validate_http_message(
message_payload=message_payload,
message_type=message_type,
):
raise ParsePayloadException("Provided payload is not valid")
if self.validate_device_info_from_http(message_payload=message_payload):
return self.parse_device_info_http(
device_identifier=device_identifier,
device_ip_address=device_ip_address,
message_payload=message_payload,
client_id=client_id,
)
if self.validate_device_status_from_http(message_payload=message_payload):
return self.parse_device_status_http(
device_identifier=device_identifier,
device_ip_address=device_ip_address,
message_payload=message_payload,
client_id=client_id,
)
if self.validate_device_description_from_http(message_payload=message_payload):
return self.parse_device_description_http(
device_identifier=device_identifier,
device_ip_address=device_ip_address,
message_payload=message_payload,
client_id=client_id,
)
raise ParsePayloadException("Provided payload is not valid")
# -----------------------------------------------------------------------------
def parse_device_description_coap( # pylint: disable=too-many-arguments,too-many-locals
self,
device_identifier: str,
device_type: str,
device_ip_address: str,
message_payload: str,
client_id: uuid.UUID,
) -> DeviceDescriptionFromCoapEntity:
"""Parse device description message received via CoAP client"""
validation_schema = self.get_validation_schema(self._COAP_DESCRIPTION_MESSAGE_SCHEMA_FILENAME)
if validation_schema is None:
raise LogicException("Message validation schema could not be loaded")
try:
parsed_message = self.validate_data_against_schema(
data=json.loads(message_payload),
schema=validation_schema,
)
if parsed_message is None:
raise ParsePayloadException("Provided payload is not valid")
except json.JSONDecodeError as ex:
raise ParsePayloadException("Provided payload is not valid") from ex
device_description = DeviceDescriptionFromCoapEntity(
client_id=client_id,
device_identifier=device_identifier,
device_type=device_type,
device_ip_address=device_ip_address,
)
return self.__extract_blocks_from_message(
device_description=device_description,
parsed_message=parsed_message,
)
# -----------------------------------------------------------------------------
def parse_device_status_coap( # pylint: disable=too-many-arguments,too-many-locals
self,
device_identifier: str,
device_ip_address: str,
message_payload: str,
client_id: uuid.UUID,
) -> DeviceStatusEntity:
"""Parse device status message received via CoAP client"""
validation_schema = self.get_validation_schema(self._COAP_STATUS_MESSAGE_SCHEMA_FILENAME)
if validation_schema is None:
raise LogicException("Message validation schema could not be loaded")
try:
parsed_message = self.validate_data_against_schema(
data=json.loads(message_payload),
schema=validation_schema,
)
except json.JSONDecodeError as ex:
raise ParsePayloadException("Provided payload is not valid") from ex
if not isinstance(parsed_message, dict):
raise ParsePayloadException("Provided payload is not valid")
device_state = DeviceStatusEntity(
client_id=client_id,
device_identifier=device_identifier,
device_ip_address=device_ip_address,
)
device_record = self.__devices_registry.get_by_identifier(
client_id=client_id,
device_identifier=device_identifier,
)
if device_record is not None:
sensor_states = parsed_message.get("G")
if not isinstance(sensor_states, list):
raise ParsePayloadException("Provided payload is not valid")
for channel, sensor_identifier, sensor_value in sensor_states:
for block_record in self.__blocks_registry.get_all_by_device(device_id=device_record.id):
sensor_record = self.__sensors_registry.get_by_identifier(
block_id=block_record.id,
sensor_identifier=sensor_identifier,
)
if sensor_record is None:
continue
if sensor_record.data_type is None:
actual_value = sensor_value
else:
actual_value = DataTransformHelpers.transform_from_device(
data_type=sensor_record.data_type,
value_format=sensor_record.format,
value=sensor_value,
)
if sensor_record is not None:
device_state.add_sensor_state(
sensor=SensorStateStatusEntity(
block_id=block_record.id,
channel=channel,
sensor_identifier=sensor_identifier,
sensor_value=actual_value,
)
)
return device_state
# -----------------------------------------------------------------------------
def parse_device_info_http( # pylint: disable=too-many-arguments
self,
device_identifier: str,
device_ip_address: str,
message_payload: str,
client_id: uuid.UUID,
) -> DeviceInfoEntity:
"""Parse device info message received via HTTP client"""
validation_schema = self.get_validation_schema(self._HTTP_SHELLY_MESSAGE_SCHEMA_FILENAME)
if validation_schema is None:
raise LogicException("Message validation schema could not be loaded")
try:
parsed_message = self.validate_data_against_schema(
data=json.loads(message_payload),
schema=validation_schema,
)
except json.JSONDecodeError as ex:
raise ParsePayloadException("Provided payload is not valid") from ex
if not isinstance(parsed_message, dict):
raise ParsePayloadException("Provided payload is not valid")
device_info = DeviceInfoEntity(
client_id=client_id,
device_identifier=device_identifier,
device_ip_address=device_ip_address,
device_type=str(parsed_message.get("type")).lower(),
device_mac_address=str(parsed_message.get("mac")),
device_auth_enabled=bool(parsed_message.get("auth")),
device_firmware_version=str(parsed_message.get("fw")),
)
return device_info
# -----------------------------------------------------------------------------
def parse_device_status_http( # pylint: disable=too-many-arguments
self,
device_identifier: str,
device_ip_address: str,
message_payload: str,
client_id: uuid.UUID,
) -> DeviceExtendedStatusEntity:
"""Parse device status message received via HTTP client"""
validation_schema = self.get_validation_schema(self._HTTP_STATUS_MESSAGE_SCHEMA_FILENAME)
if validation_schema is None:
raise LogicException("Message validation schema could not be loaded")
try:
parsed_message = self.validate_data_against_schema(
data=json.loads(message_payload),
schema=validation_schema,
)
except json.JSONDecodeError as ex:
raise ParsePayloadException("Provided payload is not valid") from ex
if not isinstance(parsed_message, dict):
raise ParsePayloadException("Provided payload is not valid")
device_info = DeviceExtendedStatusEntity(
client_id=client_id,
device_identifier=device_identifier,
device_ip_address=device_ip_address,
device_time=str(parsed_message.get("time")),
device_unixtime=int(str(parsed_message.get("unixtime"))),
)
return device_info
# -----------------------------------------------------------------------------
def parse_device_description_http(
self,
device_identifier: str,
device_ip_address: str,
message_payload: str,
client_id: uuid.UUID,
) -> DeviceDescriptionFromHttpEntity:
"""Parse device description message received via HTTP client"""
validation_schema = self.get_validation_schema(self._HTTP_DESCRIPTION_MESSAGE_SCHEMA_FILENAME)
if validation_schema is None:
raise LogicException("Message validation schema could not be loaded")
try:
parsed_message = self.validate_data_against_schema(
data=json.loads(message_payload),
schema=validation_schema,
)
if parsed_message is None:
raise ParsePayloadException("Provided payload is not valid")
except json.JSONDecodeError as ex:
raise ParsePayloadException("Provided payload is not valid") from ex
device_description = DeviceDescriptionFromHttpEntity(
client_id=client_id,
device_identifier=device_identifier,
device_ip_address=device_ip_address,
)
return self.__extract_blocks_from_message(
device_description=device_description,
parsed_message=parsed_message,
)
# -----------------------------------------------------------------------------
def __extract_blocks_from_message(
self,
device_description: T,
parsed_message: Dict[str, Any],
) -> T:
for block in list(parsed_message.get("blk", [])):
if isinstance(block, dict) and "I" in block and "D" in block:
block_description = BlockDescriptionEntity(
block_identifier=int(str(block.get("I"))),
block_description=re.sub(r"[^A-Za-z0-9_-]", "", str(block.get("D"))),
)
for sensor in list(parsed_message.get("sen", [])):
if isinstance(sensor, dict) and "I" in sensor and "T" in sensor and "D" in sensor and "L" in sensor:
block_link = sensor.get("L")
if (isinstance(block_link, list) and block_description.identifier in block_link) or (
isinstance(block_link, int) and block_description.identifier == block_link
):
data_type, value_format, value_invalid = self.__parse_range(raw_range=sensor.get("R", None))
block_description.add_sensor_state(
sensor=SensorStateDescriptionEntity(
sensor_identifier=int(str(sensor.get("I"))),
sensor_type=SensorType(str(sensor.get("T"))),
sensor_description=str(sensor.get("D")),
sensor_unit=SensorUnit(sensor.get("U", None))
if sensor.get("U", None) is not None
else None,
sensor_data_type=self.__adjust_data_type(
channel=str(block.get("D")),
description=str(sensor.get("D")),
data_type=data_type,
),
sensor_value_format=self.__adjust_value_format(
channel=str(block.get("D")),
description=str(sensor.get("D")),
value_format=value_format,
),
sensor_value_invalid=value_invalid,
sensor_queryable=False,
sensor_settable=WritableSensor.has_value(str(sensor.get("D"))),
)
)
device_description.add_block(block=block_description)
return device_description
# -----------------------------------------------------------------------------
@staticmethod
def __parse_range( # pylint: disable=too-many-branches,too-many-return-statements
raw_range: Union[str, List[str], None],
) -> Tuple[
Union[DataType, None],
Union[Set[str], Tuple[Optional[int], Optional[int]], Tuple[Optional[float], Optional[float]], None],
Union[str, int, None],
]:
invalid_value: Union[str, int, None] = None
if isinstance(raw_range, list) and len(raw_range) == 2:
normal_value = raw_range[0]
invalid_value = fast_int(raw_range[1]) if isinstance(fast_int(raw_range[1]), int) else raw_range[1]
elif isinstance(raw_range, str):
normal_value = raw_range
else:
return None, None, None
if normal_value == "0/1":
return DataType.BOOLEAN, None, invalid_value
if normal_value == "U8":
return DataType.UCHAR, None, invalid_value
if normal_value == "U16":
return DataType.USHORT, None, invalid_value
if normal_value == "U32":
return DataType.UINT, None, invalid_value
if normal_value == "I8":
return DataType.CHAR, None, invalid_value
if normal_value == "I16":
return DataType.SHORT, None, invalid_value
if normal_value == "I32":
return DataType.INT, None, invalid_value
if "/" in normal_value:
normal_value_parts = normal_value.strip().split("/")
if (
len(normal_value_parts) == 2
and isinstance(fast_int(normal_value_parts[0]), int)
and isinstance(fast_int(normal_value_parts[1]), int)
):
return (
DataType.INT,
(int(fast_int(normal_value_parts[0])), int(fast_int(normal_value_parts[1]))),
invalid_value,
)
if (
len(normal_value_parts) == 2
and isinstance(fast_float(normal_value_parts[0]), int)
and isinstance(fast_float(normal_value_parts[1]), int)
):
return (
DataType.FLOAT,
(float(fast_float(normal_value_parts[0])), float(fast_float(normal_value_parts[1]))),
invalid_value,
)
return DataType.ENUM, {item.strip() for item in normal_value_parts if item.strip()}, invalid_value
return None, None, None
# -----------------------------------------------------------------------------
@staticmethod
def __adjust_data_type(channel: str, description: str, data_type: Optional[DataType]) -> Optional[DataType]:
if channel.startswith("relay") and description == "output":
return DataType.SWITCH
if channel.startswith("light") and description == "output":
return DataType.SWITCH
return data_type
# -----------------------------------------------------------------------------
@staticmethod
def __adjust_value_format(
channel: str,
description: str,
value_format: Union[
Set[str], Tuple[Optional[int], Optional[int]], Tuple[Optional[float], Optional[float]], None
],
) -> Union[Set[str], Tuple[Optional[int], Optional[int]], Tuple[Optional[float], Optional[float]], None]:
if (channel.startswith("relay") and description == "output") or (
channel.startswith("light") and description == "output"
):
return {
SwitchPayload.ON.value,
SwitchPayload.OFF.value,
SwitchPayload.TOGGLE.value,
}
return value_format
|
<filename>qasm2image/svg/_constants.py
# ======================================================================
# Copyright CERFACS (February 2018)
# Contributor: <NAME> (<EMAIL>)
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""Definition of constants proper to the qasm2svg generator."""
# Sizes
GATE_SIZE = 100
CONTROL_GATE_SIZE = 20
MEASURE_GATE_CLBIT_SIZE = 20
STROKE_THICKNESS = 2
REGISTER_NAME_WIDTH = 150
MAX_REGISTER_NAME_HEIGHT = 50
# Margins, spacing, border
GATE_HORIZONTAL_SPACING = 100
GATE_LEFT_BORDER = 100
GATE_RIGHT_BORDER = 100
GATE_INSIDE_MARGIN = 10
REGISTER_NAME_LEFT_BORDER = 10
REGISTER_NAME_RIGHT_BORDER = 10
VERTICAL_BORDER = 100
REGISTER_LINES_VERTICAL_SPACING = 150
DOUBLE_LINES_SEPARATION = 2
# Colors
GATE_FILL_COLOR = 'white'
GATE_BORDER_COLOR = 'black'
REGISTER_LINES_COLOR = 'black'
CONTROL_TRUE_GATE_FILL_COLOR = 'black'
CONTROL_FALSE_GATE_FILL_COLOR = 'white'
MEASURE_GATE_CLBIT_FILL_COLOR = 'black'
# Font size
REGISTER_NAME_FONT_SIZE = 200
# 1/3+1/30 is an experimental value that seems to work for
# centering vertically the text. The initial value was 1/3
# but the 1-character gate names were a little shifted up,
# that is why the constant is in this form, 1/30 is just
# here as "a little value".
FONT_SIZE_CENTER_VERTICALLY_MULTIPLIER = 1 / 3 + 1 / 30
FONT_SIZE_REDUCTION_FACTOR_FOR_CONTROLLED_GATES = 0.8
# Other
PARAMETERS_ROUND_DECIMAL = 2
# We need to fix a maximum size for the PNG and adapt the scale variable
# *before* calling svg2png. If we call svg2png before adapting the scale,
# very large outputs will SEGFAULT because of cairo.
MAX_PNG_SIZE_PX = 20000 * 20000
# Checks
assert REGISTER_LINES_VERTICAL_SPACING > GATE_SIZE, \
"Gates may vertically overlap with the given constants."
assert VERTICAL_BORDER > GATE_SIZE / 2, \
"Gates may be drawn outside the image with the given constants."
|
<reponame>Kwounsu/Winee<filename>mysite/myapp/views.py
import csv, io
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth import logout
from django.contrib.auth.models import User
from django.core.paginator import Paginator
from django.db.models import Avg, Subquery
from math import sqrt
from random import randint
from . import models
from . import forms
# Create your views here.
def index(request):
# Rating stacked total
rate_stacked_count = models.Rating.objects.all().count()
if rate_stacked_count >= 1000 and rate_stacked_count < 1000000:
rate_stacked_count /= 1000
rate_stacked = str(rate_stacked_count) + 'K'
elif rate_stacked_count >= 1000000 and rate_stacked_count < 1000000000:
rate_stacked_count /= 1000000
rate_stacked = str(rate_stacked_count) + 'M'
else:
rate_stacked = rate_stacked_count
# Trending wines / Wine Recommendation
wines = models.Wine.objects.all().order_by('-rate_stacked')[:10]
top_one_rate = 0
top_two_rate = 0
top_three_rate = 0
top_one_wine = []
top_two_wine = []
top_three_wine = []
for wine in wines:
# Get predicted rate
predicted_rate = 0
if rate_stacked_count > 0:
predicted_rate = getPredictRate(request.user.id, wine.id)
if predicted_rate == 0 or predicted_rate > 5:
predicted_rate = models.Wine.objects.filter(id=wine.id).values('points')[0]['points'] / 20
if top_three_rate < predicted_rate:
if top_two_rate < predicted_rate:
if top_one_rate < predicted_rate:
top_three_rate = top_two_rate
top_two_rate = top_one_rate
top_one_rate = predicted_rate
recommending_wine = models.Wine.objects.get(id=wine.id)
top_one_wine.clear()
top_one_wine.append(recommending_wine)
else:
top_three_rate = top_two_rate
top_two_rate = predicted_rate
recommending_wine = models.Wine.objects.get(id=wine.id)
top_two_wine.clear()
top_two_wine.append(recommending_wine)
else:
top_three_rate = predicted_rate
recommending_wine = models.Wine.objects.get(id=wine.id)
top_three_wine.clear()
top_three_wine.append(recommending_wine)
recommending_wines = top_one_wine + top_two_wine + top_three_wine
context = {
"title":"Best Bottle",
"rate_stacked":rate_stacked,
"wines":recommending_wines,
}
return render(request, "index.html", context=context)
def logout_view(request):
logout(request)
return redirect("/")
def register(request):
if request.method == "POST":
form_instance = forms.RegistrationForm(request.POST)
if form_instance.is_valid():
form_instance.save()
return redirect("/login/")
else:
form_instance = forms.RegistrationForm()
context = {
"form":form_instance,
}
return render(request, "registration/register.html", context=context)
@login_required(login_url='/login/')
def search(request):
br = models.Wine.objects.all().order_by('-id')
b = request.GET.get('b','')
if b:
br = br.filter(title__icontains=b) | br.filter(winery__icontains=b)
paginator = Paginator(br, 12)
page = request.GET.get('page')
br = paginator.get_page(page)
totalQ = models.Wine.objects.all().count()
context = {
"title":"Best Bottle",
"search":br,
"b":b,
"page":page,
"paginator":paginator,
"totalQ":totalQ,
}
return render(request, 'search.html', context)
@login_required(login_url='/login/')
def mypage(request):
rated_wine_queryset = models.Rating.objects.filter(user=request.user)
wines = models.Wine.objects.filter(pk__in=rated_wine_queryset.values('wine_id'))
rates = rated_wine_queryset.filter(pk__in=rated_wine_queryset).select_related('wine').order_by('-rating')
wishlists_queryset = models.WishList.objects.filter(user=request.user)
wishlists = wishlists_queryset.filter(pk__in=wishlists_queryset)
wishlistsNum = wishlists_queryset.count()
totalRatedWines = wines.count()
paginator = Paginator(rates, 12)
page = request.GET.get('page')
br = paginator.get_page(page)
context = {
"title":"Best Bottle",
"wines":wines,
"totalRatedWines":totalRatedWines,
"rates":rates,
"page":page,
"paginator":paginator,
"wishlists":wishlists,
"wishlistsNum":wishlistsNum,
"search":br,
}
return render(request, 'mypage.html', context)
@login_required(login_url='/login/')
def wine_info(request, wine_id):
wine = models.Wine.objects.get(id=wine_id)
# My Rate
try:
my_rate = models.Rating.objects.get(wine=wine_id, user=request.user)
except models.Rating.DoesNotExist:
my_rate = None
try:
avg_rate = models.Rating.objects.filter(wine=wine_id) \
.values('wine') \
.annotate(Avg('rating'))[0]['rating__avg']
except:
avg_rate = 0
temp_predicted_rate = models.Wine.objects.filter(id=wine_id) \
.values('points')[0]['points'] / 20
predicted_rate = temp_predicted_rate
# Rate Stacked
rate_stacked_count = models.Rating.objects.filter(wine=wine_id).count()
if rate_stacked_count >= 1000 and rate_stacked_count < 1000000:
rate_stacked_count /= 1000
rate_stacked = str(rate_stacked_count) + 'K'
elif rate_stacked_count >= 1000000 and rate_stacked_count < 1000000000:
rate_stacked_count /= 1000000
rate_stacked = str(rate_stacked_count) + 'M'
else:
rate_stacked = rate_stacked_count
# Predicted Rate
if rate_stacked_count > 0:
predicted_rate = getPredictRate(request.user.id, wine_id)
if predicted_rate == 0:
predicted_rate = temp_predicted_rate
# WishList
try:
boolWishList = models.WishList.objects.get(wine=wine_id, user=request.user)
except models.WishList.DoesNotExist:
boolWishList = 0
# Rating Chart
Ones = 0
Twos = 0
Theres = 0
Fours = 0
Fives = 0
ratingQS = models.Rating.objects.filter(wine=wine_id)
for i in ratingQS:
if int(i.rating) == 1:
Ones += 1
elif int(i.rating) == 2:
Twos += 1
elif int(i.rating) == 3:
Theres += 1
elif int(i.rating) == 4:
Fours += 1
elif int(i.rating) == 5:
Fives += 1
chart = [Ones, Twos, Theres, Fours, Fives]
context = {
"title":"Best Bottle",
"wine":wine,
"my_rate":my_rate,
"rate_stacked":rate_stacked,
"avg_rate":avg_rate,
"temp_predicted_rate":temp_predicted_rate,
"predicted_rate":predicted_rate,
"boolWishList":boolWishList,
"chart":chart,
}
return render(request, 'wine_info.html', context)
def sim_pearson(reqUser, user2):
sumX = 0 # sum of X
sumY = 0 # sum of Y
sumPowX = 0 # sum of power of X
sumPowY = 0 # sum of power of Y
sumXY = 0 # sum of X*Y
user1rates = models.Rating.objects.filter(user_id=reqUser) # user1's rating list
user2rates = models.Rating.objects.filter(user_id=user2) # user2's rating list
der_user1 = user1rates.filter(wine_id__in=Subquery(user2rates.values('wine_id')))
der_user2 = user2rates.filter(wine_id__in=Subquery(user1rates.values('wine_id')))
count = der_user1.count() # num of wines
if count < 2:
return 0
for d_user1 in der_user1.iterator():
sumX += d_user1.rating
sumPowX += pow(d_user1.rating, 2)
for d_user2 in der_user2.iterator():
if d_user2.wine_id == d_user1.wine_id:
sumXY += d_user1.rating * d_user2.rating
for d_user2 in der_user2.iterator():
sumY += d_user2.rating
sumPowY += pow(d_user2.rating, 2)
result = 0
try:
result = (sumXY - ((sumX * sumY) / count)) / sqrt((sumPowX - (pow(sumX, 2) / count)) * (sumPowY - (pow(sumY, 2) / count)))
except:
pass
return result
def getPredictRate(theUser, theWine, sim_function=sim_pearson):
ratings = models.Rating.objects.filter(wine=theWine)
users = User.objects.filter(pk__in=ratings.values('user_id'))
predicted_rate = 0
sumR = 0
for i in users:
if theUser != i.id:
r = sim_function(theUser, i.id)
simRatingObj = models.Rating.objects.get(wine=theWine, user=i.id)
predicted_rate += r * simRatingObj.rating
sumR += r
if sumR == 0:
predicted_rate = 0
else:
predicted_rate /= sumR
return predicted_rate
def ratingWine(request, rate, wine_id):
wine = models.Wine.objects.get(id=wine_id)
if rate == 0:
query = models.Rating.objects.get(wine=wine, user=request.user)
query.delete()
wine.rate_stacked -= 1
wine.save()
else:
try:
query = models.Rating.objects.get(wine=wine, user=request.user)
query.delete()
except:
pass
obj, created = models.Rating.objects.update_or_create(
user=request.user, wine=wine, rating=rate
)
wine.rate_stacked += 1
wine.save()
return redirect("/wine_info/"+ str(wine_id) + "/")
def AddWishList(request, wine_id):
wine = models.Wine.objects.get(id=wine_id)
obj, created = models.WishList.objects.update_or_create(
wine=wine, user=request.user
)
return redirect("/wine_info/"+ str(wine_id) + "/")
def DelWishList(request, wine_id):
wine = models.Wine.objects.get(id=wine_id)
query = models.WishList.objects.get(wine=wine, user=request.user)
query.delete()
return redirect("/wine_info/"+ str(wine_id) + "/") |
"""
Various statistical and plotting utilities
"""
from numpy import *
from scipy.optimize import leastsq,fminbound
from scipy.special import erf
import distributions as dists
import numpy as np
import emcee
import numpy.random as rand
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import plotutils as plu
#from lmfit import minimize, Parameters, Parameter, report_fit
def fit_pdf(bins,h,distfn,p0,nwalkers=200,nburn=200,niter=1000,mcmc=True):
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
if mcmc:
ndims = len(p0)
model = Model_Distribution_Histogram(newbins,newh,distfn)
sampler = emcee.EnsembleSampler(nwalkers,ndims,model)
p0 = rand.random((nwalkers,ndims))
pos, prob, state = sampler.run_mcmc(p0, nburn) #burn in
#run for real
sampler.reset()
niter=1000
foo = sampler.run_mcmc(pos, niter, rstate0=state)
return sampler
else:
#mu0 = bins[np.argmax(h)]
#sig0 = abs(mu0 - bins[np.argmin(np.absolute(h - 0.5*h.max()))])
#p0 = (mu0,sig0,sig0,sig0,sig0,0.5,0.5)
def resid(pars):
return newh - distfn(newbins,pars)
pfit,success = leastsq(resid,p0)
return pfit,success
class Model_Distribution_Histogram(object):
def __init__(self,bins,h,distfn):
norm = np.trapz(h,bins)
self.bins = bins
self.h = h/norm #ensure integration to 1
self.distfn = distfn
def __call__(self,pars):
""" returns log-likelihood
"""
hmodel = self.distfn(self.bins,pars)
return (-0.5*(hmodel - self.h)**2/(self.h*0.01)**2).sum()
class Model_Distribution(object):
def __init__(self,data,distfn):
"""distfn takes data as input; returns distribution evaluated at those data points
"""
self.data = data
self.distfn = distfn
def __call__(self,pars):
""" returns log-likelihood
pars are parameters relevent for self.distfn
"""
logl = np.log10(distfn(data,pars))
return logl.sum()
def pctile(x,q):
q /= 100.
s = sort(x)
n = size(x)
i = s[int(n*q)]
return x[i]
def normal(x,mu,sig):
return 1./(sig*sqrt(2*pi))*exp(-(x-mu)**2/(2*sig**2))
def qstd(x,quant=0.05,top=False,bottom=False):
"""returns std, ignoring outer 'quant' pctiles
"""
s = sort(x)
n = size(x)
lo = s[int(n*quant)]
hi = s[int(n*(1-quant))]
if top:
w = where(x>=lo)
elif bottom:
w = where(x<=hi)
else:
w = where((x>=lo)&(x<=hi))
return std(x[w])
def kdeconf(kde,conf=0.683,xmin=None,xmax=None,npts=500,shortest=True,conftol=0.001,return_max=False):
if xmin is None:
xmin = kde.dataset.min()
if xmax is None:
xmax = kde.dataset.max()
x = linspace(xmin,xmax,npts)
return conf_interval(x,kde(x),shortest=shortest,conf=conf,conftol=conftol,return_max=return_max)
def conf_interval(x,L,conf=0.683,shortest=True,conftol=0.001,return_max=False):
#x,L = args
cum = cumsum(L)
cdf = cum/cum.max()
if shortest:
maxind = L.argmax()
if maxind==0: #hack alert
maxind = 1
if maxind==len(L)-1:
maxind = len(L)-2
Lval = L[maxind]
lox = x[0:maxind]
loL = L[0:maxind]
locdf = cdf[0:maxind]
hix = x[maxind:]
hiL = L[maxind:]
hicdf = cdf[maxind:]
dp = 0
s = -1
dL = Lval
switch = False
last = 0
while absolute(dp-conf) > conftol:
Lval += s*dL
if maxind==0:
loind = 0
else:
loind = (absolute(loL - Lval)).argmin()
if maxind==len(L)-1:
hiind = -1
else:
hiind = (absolute(hiL - Lval)).argmin()
dp = hicdf[hiind]-locdf[loind]
lo = lox[loind]
hi = hix[hiind]
if dp == last:
break
last = dp
cond = dp > conf
if cond ^ switch:
dL /= 2.
s *= -1
switch = not switch
# while dp < conf:
# Lval -= dL
# loind = argmin(abs(loL - Lval))
# hiind = argmin(abs(hiL - Lval))
# dp = hicdf[hiind]-locdf[loind]
# lo = lox[loind]
# hi = hix[hiind]
else:
alpha = (1-conf)/2.
lo = x[absolute(cdf-alpha).argmin()]
hi = x[(absolute(cdf-(1-(alpha)))).argmin()]
if return_max:
xmaxL = x[L.argmax()]
return xmaxL,lo,hi
else:
return (lo,hi)
def gaussian(x,p):
A,mu,sig = p
return A*exp(-0.5*(x-mu)**2/sig**2)
def lorentzian(x,p):
A,mu,gam = p
return A*gam**2/((x-mu)**2 + gam**2)
def voigt(x,p):
A,mu,gam,sig = p
z = ((x-mu)+1j*gam)/(sig*sqrt(2))
test = cef(z).real[where(abs(x)<2)]
y = cef(z).real/(sig*sqrt(2*pi))
ymax = y.max()
return A*y/ymax
def Nvoigt(x,p):
"""takes 1+5N parameters: offset, then A,mu,sig,gam
"""
p = atleast_1d(p)
N = (len(p)-1)/4
y=0
for i in arange(N):
y += voigt(x,p[1+4*i:1+4*i+4])
return y+p[0]
def fit_Nvoigt(x,y,p0,dy=None,BIC=False):
def errfn(p,x):
return Nvoigt(x,p)-y
pfit,success = leastsq(errfn,p0,args=(x,))
if not BIC:
return pfit
else:
if dy is None:
raise ValueError('Must provide uncertainties to calculate BIC')
ymod = Nvoigt(x,pfit)
logL = log(1./sqrt(2*pi*dy)*exp(-0.5*((y-ymod)**2/dy**2))).sum()
bic = -2*logL + len(pfit)*log(len(x))
return pfit,bic
def pvoigt(x,p):
eta,A,mu,sig,gam = p
pgau = (1.,mu,sig)
plor = (1.,mu,gam)
if eta>1:
eta = 1
if eta<0:
eta = 0
return A*(eta*lorentzian(x,plor) + (1-eta)*gaussian(x,pgau))
def Npvoigt(x,p):
"""takes 1+5N parameters: offset, then eta,A,mu,sig,gam
"""
p = atleast_1d(p)
N = (len(p)-1)/5
y=0
for i in arange(N):
y += pvoigt(x,p[1+5*i:1+5*i+5])
return y+p[0]
def fit_Npvoigt(x,y,p0,dy=None,BIC=False):
return fit_fn(Npvoigt(x,y,p0,dy,BIC))
def fit_fn(fn,x,y,p0,dy=None,BIC=False):
def errfn(p,x):
return fn(x,p)-y
pfit,success = leastsq(errfn,p0,args=(x,))
if not BIC:
return pfit
else:
if dy is None:
raise ValueError('Must provide uncertainties to calculate BIC')
ymod = fn(x,pfit)
logL = log(1./sqrt(2*pi*dy)*exp(-0.5*((y-ymod)**2/dy**2))).sum()
bic = -2*logL + len(pfit)*log(len(x))
return pfit,bic
def Nlorentzian(x,p):
p = atleast_1d(p)
N = (len(p)-1)/3
y = 0
for i in arange(N):
A,mu,sig = p[1+3*i:1+3*i+3]
y += A*exp(-(x-mu)**2/(2.*sig**2))
return y + p[0]
def fit_Nlor(x,y,p0,dy=None,BIC=False):
return fit_fn(Nlorentzian,x,y,p0,dy,BIC)
def Ngauss_1d(x,p):
p = atleast_1d(p)
N = (len(p)-1)/3
y = 0
for i in arange(N):
A,mu,sig = p[1+3*i:1+3*i+3]
y += A*exp(-(x-mu)**2/(2.*sig**2))
return y + p[0]
def fit_Ngauss_1d(x,y,p0,dy=None,BIC=False):
return fit_fn(Ngauss_1d,x,y,p0,dy,BIC)
def howmany_gauss_1d(x,y,dy,Nmax=3,p0=None):
bics = zeros(Nmax)
pfits = []
if p0 is None:
base = pctile(y,0.1)
p0 = (base,(y-base).std(),(y-base).mean(),(y-base).max())
for i in arange(Nmax):
p = concatenate(([p0[0]],repeat(p0[1:],i+1)))
pfit,bic = fit_Ngauss_1d(x,y,p,dy,BIC=True)
pfits.append(pfit)
bics[i] = bic
bics -= bics.max()
print bics
def erfi(x):
return -1j*erf(1j*x)
def cef(x):
return exp(-x**2)*(1+1j*erfi(x))
######### routines developed for Ay117 class at Caltech #######
def conf_interval_old(x,L,conf=0.683,shortest=True,conftol=0.001):
#x,L = args
cum = cumsum(L)
cdf = cum/max(cum)
if shortest:
maxind = argmax(L)
Lval = L[maxind]
lox = x[0:maxind]
loL = L[0:maxind]
locdf = cdf[0:maxind]
hix = x[maxind:]
hiL = L[maxind:]
hicdf = cdf[maxind:]
dp = 0
s = -1
dL = Lval
switch = False
last = 0
while abs(dp-conf) > conftol:
Lval += s*dL
loind = argmin(abs(loL - Lval))
hiind = argmin(abs(hiL - Lval))
dp = hicdf[hiind]-locdf[loind]
lo = lox[loind]
hi = hix[hiind]
if dp == last:
break
last = dp
cond = dp > conf
if cond ^ switch:
dL /= 2.
s *= -1
switch = not switch
# while dp < conf:
# Lval -= dL
# loind = argmin(abs(loL - Lval))
# hiind = argmin(abs(hiL - Lval))
# dp = hicdf[hiind]-locdf[loind]
# lo = lox[loind]
# hi = hix[hiind]
else:
alpha = (1-conf)/2.
lo = x[argmin(abs(cdf-alpha))]
hi = x[argmin(abs(cdf-(1-(alpha))))]
return (lo,hi)
def conf2d(x,y,L,conf=.683,conftol=0.001):
"""returns the contour level of L corresponding to a given confidence level.
L is a 2-d likelihood grid that need not be normalized, with x and y representing the two dimensions.
conftol controls how exact you want your answer to be."""
norm = trapz2d(L,x,y)
prob = 0
Lval = max(L.ravel())
dL = Lval
s = -1
switch = False
last = 0
while abs(prob-conf) > conftol:
Lval += s*dL
Ltemp = L*(L>Lval)
prob = trapz2d(Ltemp,x,y)/norm
cond = prob > conf
if prob == last:
break
last = prob
if cond ^ switch:
dL /= 2.
s *= -1
switch = not switch
#print Lval,prob
return Lval
def trapz2d(L,x,y):
return trapz(trapz(L,y,axis=0),x)
def plot_posterior(x,px,name='x',ax=None,fig=None,conf=0.683,shortest=True,median=False,justline=False,shade=True,label=True,\
labelpos=(0.05,0.7),horizontal=False,axislabels=True,fmt='%.2f',conflabel=True,evidence=False):
"""Plots a 1-D posterior pdf described by x and px. Default is to put a vertical dotted line at the
best fit value, to shade the shortest 68% confidence interval, and to annotate the graph with the numerical result.
Inputs:
x : vector abcissa values
px : probability or likelihood as function of x; must be same size as x, not necessarily normalized
Optional Inputs:
name : variable name; for use in labels
ax : matplotlib 'axis' object; in case you want to specify the plot to be on a specific axis object
fig : the number of the figure to put the plot on; empty creates a new figure if 'ax' is not specified
conf : confidence level for shade region
shortest: make False for symmetric confidence region
median : make True to draw vertical line at median value instead of max. likelihood
justline: make True to plot just the posterior pdf; nothing else
shade : make False to turn off shading
label : make False to turn off the label w/ the value and error bars
labelpos: the position to place the label, in axis coordinates
horizontal: make True to make the plot horizontal (e.g. for a 2d posterior plot)
axislabels: make False to turn off
fmt: format string for label
conflabel: make False to not include the confidence level in the label
Results:
Makes a nifty plot
Dependencies:
-numpy,matplotlib
-conf_interval
"""
if ax == None:
plu.setfig(fig)
lo,hi = conf_interval(x,px,conf,shortest=shortest)
if ax==None:
ax = plt.gca()
if not median:
best = x[argmax(px)]
else:
cum = cumsum(px)
cdf = cum/max(cum)
best = x[argmin(abs(cdf-0.5))]
loerr = best-lo
hierr = hi - best
if not horizontal:
ax.plot(x,px,'k')
if axislabels:
ax.set_xlabel('$ %s $' % name,fontsize=16)
ax.set_ylabel('$ p(%s) $' % name,fontsize=16)
else:
ax.plot(px,x,'k')
if axislabels:
ax.set_xlabel('$ p(%s) $' % name,fontsize=16)
ax.set_ylabel('$ %s $' % name,fontsize=16)
if justline:
return
if not horizontal:
ax.axvline(best,color='k',ls=':')
else:
ax.axhline(best,color='k',ls=':')
w = where((x > lo) & (x < hi))
if shade:
if not horizontal:
ix = x[w]
iy = px[w]
verts = [(lo,0)] + zip(ix,iy) + [(hi,0)]
else:
ix = px[w]
iy = x[w]
verts = [(0,lo)] + zip(ix,iy) + [(0,hi)]
poly = plt.Polygon(verts,facecolor='0.8',edgecolor='k')
ax.add_patch(poly)
beststr = fmt % best
hierrstr = fmt % hierr
loerrstr = fmt % loerr
if hierrstr == loerrstr:
resultstr = '$%s=%s \pm %s$' % (name,beststr,hierrstr)
else:
resultstr = '$ %s =%s^{+%s}_{-%s}$' % (name,beststr,hierrstr,loerrstr)
if conflabel:
#print conf
resultstr += '\n\n(%i%% confidence)' % int(conf*100)
if evidence:
resultstr += '\n\nevidence = %.2e' % trapz(px,x)
if label:
ax.annotate(resultstr,xy=labelpos,xycoords='axes fraction',fontsize=16)
def plot_posterior2d(x,y,L,name1='x',name2='y',confs=[0.68,0.95,0.99],conf=0.683,ax=None,fig=None,\
labelpos1=(0.6,0.5),labelpos2=(0.3,0.8),fmt1='%.2f',fmt2='%.2f',evidence=False,\
evidencelabelpos=(0.05,0.85),labels=True,shade=True,justline=False,
symmetric=False,justcontour=False):
"""Plots contour plot of 2D posterior surface, with given contour levels, including marginalized 1D
posteriors of the two individual parameters.
Inputs:
x,y : vectors that represent the two directions of the parameter grid
L : 2D grid of likelihood values; not necessarily normalized
Optional Inputs:
confs : list of confidence contours to plot
name1,name2 : names of variables
ax : matplotlib 'axis' object, in case you want to specify
fig : the number of the figure to put the plot on; creates a new figure if not specified
labelpos1,labelpos2 : where to put the labels on the 1D posterior plots
fmt1, fmt2 : format strings for labels
Results:
Makes a nifty plot
Dependencies:
--numpy, matplotlib
--plot_posterior, conf_interval, conf2d
"""
plu.setfig(fig)
if ax == None:
ax = plt.gca()
if symmetric:
foo1,foo2 = meshgrid(x,y)
L[where(foo1-foo2 < 0)] = 0
px = trapz(L,y,axis=0)
py = trapz(L,x,axis=1)
X,Y = meshgrid(x,y)
plt.clf()
if not justcontour:
left, width = 0.1, 0.6
bottom, height = 0.1, 0.6
bottom_h = left_h = left+width #+0.05
nullfmt = matplotlib.ticker.NullFormatter()
rect_center = [left, bottom, width, height]
rect_top = [left, bottom_h, width, 0.2]
rect_right = [left_h, bottom, 0.2, height]
axcenter = plt.axes(rect_center)
axtop = plt.axes(rect_top)
axright = plt.axes(rect_right)
else:
axcenter = plt.gca()
levels = zeros(len(confs))
i=0
for c in confs:
levels[i] = conf2d(x,y,L,c)
i+=1
axcenter.contour(X,Y,L,lw=1,levels=levels)
w = where(L==max(L.ravel()))
axcenter.plot(x[w[1]],y[w[0]],'k+')
axcenter.set_xlabel('$%s$' % name1,fontsize=16)
axcenter.set_ylabel('$%s$' % name2,fontsize=16)
if not justcontour:
plot_posterior(x,px,name1,conf=conf,ax=axtop,axislabels=False,labelpos=labelpos1,fmt=fmt1,
conflabel=False,label=labels,shade=shade,justline=justline,fig=0)
plot_posterior(y,py,name2,conf=conf,ax=axright,horizontal=True,axislabels=False,labelpos=labelpos2,
fmt=fmt2,conflabel=False,label=labels,shade=shade,justline=justline,fig=0)
axtop.yaxis.set_major_formatter(nullfmt)
axtop.xaxis.set_major_formatter(nullfmt)
axright.xaxis.set_major_formatter(nullfmt)
axright.yaxis.set_major_formatter(nullfmt)
if evidence:
axcenter.annotate('evidence = %.2e' % trapz2d(L,x,y),xy=evidencelabelpos,xycoords='axes fraction')
def errorbars(x,L,conf=0.95):
lo,hi = conf_interval(x,L,conf)
maxL = x[argmax(L)]
l = maxL-lo
h = hi-maxL
return l,h
def triangle_plot(data,names,marg_orientations='v',fig=None,figsize=(8,8),
lims=None,ticks=None,plotfn_2d=None,plotfn_1d=None,
kwargs_2d=None,kwargs_1d=None,small_margs=True,marg_spines=False,
mark_values=None,mark_markersize=15,
plot_kwargs=None,hist_kwargs=None,axislabel_kwargs=None):
"""plotfn_2d and plotfn_1d are the 2-d and marginalized plots.
defaults are scatter plot and histogram. plotfn_1d must
take a keyword argument "orientation", which may take
"vertical" or "horizontal" values
plotfns must take axis object as argument
"""
#plu.setfig(fig,figsize=figsize)
fig = plt.gcf()
if kwargs_2d is None:
kwargs_2d = {}
if kwargs_1d is None:
kwargs_1d = {}
if plot_kwargs is None:
plot_kwargs = dict(marker='o',ls='none',ms=1,color='k')
if hist_kwargs is None:
hist_kwargs = dict(normed=True,histtype='step',color='k')
if axislabel_kwargs is None:
axislabel_kwargs = {'fontsize':22}
n = len(names)
if lims is None:
lims = []
for i in range(n):
lims.append((data[:,i].min(),data[:,i].max()))
if type(marg_orientations) != type([]):
marg_orientations = [marg_orientations]*n
outer_grid = gridspec.GridSpec(n, n, wspace=0.0, hspace=0.0)
#width_ratios=[100]*n + [1],
#height_ratios=[1] + [100]*n)
for i in np.arange(n):
for j in np.arange(i+1,n):
#k = (j+1)*(n+1) + (i)
k = j*n + i
ax = plt.Subplot(fig,outer_grid[k])
if plotfn_2d is None:
ax.plot(data[:,i],data[:,j],**plot_kwargs)
else:
plotfn_2d(data[:,i],data[:,j],ax=ax,**kwargs_2d)
if mark_values is not None:
ax.plot(mark_values[i],mark_values[j],'x',zorder=10,
ms=mark_markersize,mew=3,color='r')
if i != 0:
ticklabels = ax.get_yticklabels()
plt.setp(ticklabels,visible=False)
else:
ax.set_ylabel(names[j],**axislabel_kwargs)
if j != n-1:
ticklabels = ax.get_xticklabels()
plt.setp(ticklabels,visible=False)
else:
ax.set_xlabel(names[i],**axislabel_kwargs)
if ticks is not None:
if ticks[i] is not None:
ax.set_xticks(ticks[i])
if ticks[j] is not None:
ax.set_yticks(ticks[j])
if lims is not None:
ax.set_xlim(*lims[i])
ax.set_ylim(*lims[j])
fig.add_subplot(ax)
#k = (i+1)*(n+1) + (i)
k = i*n + i
if small_margs:
if marg_orientations[i]=='v':
orientation = 'vertical'
h_ratios = [2,1]
w_ratios = [1]
inner_grid = gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=outer_grid[k], wspace=0.0, hspace=0.0,
height_ratios=h_ratios, width_ratios=w_ratios)
ax = plt.Subplot(fig,inner_grid[1])
elif marg_orientations[i]=='h':
orientation = 'horizontal'
h_ratios = [1]
w_ratios = [1,2]
inner_grid = gridspec.GridSpecFromSubplotSpec(1, 2,
subplot_spec=outer_grid[k], wspace=0.0, hspace=0.0,
height_ratios=h_ratios, width_ratios=w_ratios)
ax = plt.Subplot(fig,inner_grid[0])
else:
ax = plt.Subplot(fig,outer_grid[k])
if marg_orientations[i]=='v':
orientation='vertical'
elif marg_orientations[i]=='h':
orientation='horizontal'
if plotfn_1d is None:
ax.hist(data[:,i],orientation=orientation,**hist_kwargs)
else:
plotfn_1d(data[:,i],ax=ax,orientation=orientation,
**kwargs_1d)
if mark_values is not None:
if marg_orientations[i]=='v':
ax.axvline(mark_values[i],color='r',lw=2)
elif marg_orientations[i]=='h':
ax.axhline(mark_values[i],color='r',lw=2)
if marg_orientations[i]=='v':
ax.set_yticks([])
ax.xaxis.set_ticks_position('bottom')
ticklabels = ax.get_xticklabels()
plt.setp(ticklabels,visible=False)
elif marg_orientations[i]=='h':
ax.yaxis.set_ticks_position('left')
ax.set_xticks([])
ticklabels = ax.get_yticklabels()
plt.setp(ticklabels,visible=False)
ax.spines['right'].set_visible(marg_spines)
if k == n+1:
ax.spines['left'].set_visible(marg_spines)
ax.spines['top'].set_visible(marg_spines)
if i == n-1:
if marg_orientations[i]=='v':
plt.setp(ticklabels,visible=True)
plt.xlabel(names[i],fontsize=16)
if ticks is not None:
ax.set_xticks(ticks[i])
elif marg_orientations[i]=='h':
ax.spines['bottom'].set_visible(marg_spines)
if lims is not None:
if marg_orientations[i]=='v':
ax.set_xlim(*lims[i])
elif marg_orientations[i]=='h':
ax.set_ylim(*lims[i])
fig.add_subplot(ax)
plt.subplots_adjust(left=0.1,bottom=0.1,right=0.9+1./n,top=0.9+1./n)
|
<filename>src/tagger/__init__.py<gh_stars>0
import typing as t
import warnings
from collections import namedtuple, OrderedDict
import numpy as np
from numpy.lib.recfunctions import unstructured_to_structured
__all__ = [
'Tagger',
]
def _parse_spec(spec: t.List[str]) -> t.OrderedDict[str, int]:
"""Parse a spec, transforming it into an OrderedDict."""
spec_dict = OrderedDict()
# Iterate over the spec, creating a new entry
# in spec_dict for each new field name.
prev_field = None
for field in spec:
# Convert and check that it's a valid identifier.
field = str(field)
if not field.isidentifier():
raise ValueError(
f'field names must be valid identifiers: {field!r}')
# Check for new field.
if field != prev_field:
# Check for non-contiguous fields.
if field in spec_dict:
raise ValueError(f'field {field!r} is non-contiguous')
spec_dict[field] = 0
spec_dict[field] += 1
prev_field = field
return spec_dict
class Tagger():
"""Generate and parse structured integer tags.
Tagger takes a list of field names, specified for each digit of a tag, and
processes tags into named tuples. Tags that have fewer digits than the
specifier are zero-filled to the specifier's length -- so the tag 104 is
equivalent to the tag 00104 for a specifier that has five digits.
"""
def __init__(self, spec: t.List[str], mapping: dict = None):
"""
Parameters
----------
spec : list[str]
List of field names that define the meanings of each digit in
processed tags. Fields must be contiguous -- ['kind', 'kind', 'num']
is acceptable, but ['kind', 'num', 'kind'] is not.
mapping : dict, optional
Dict of callables that post-process the evaluated integers. Does not
need to be defined for every field; if not present, the integer is
returned unchanged for that field.
"""
self.spec = _parse_spec(spec)
self.num_fields = len(self.spec)
self.max_length = len(spec)
self.mapping = {} if mapping is None else mapping
self._tagfactory = namedtuple('Tag', self.spec.keys())
# Determine how many places each field needs to be shifted by to create
# the tag. The first field doesn't need to be shifted at all, the second
# needs to be shifted by the number of digits allotted for the first,
# the third by the number of digits for the first AND second, etc. Done
# in reverse order so that left-to-right order is maintained.
fields = reversed(self.spec.keys())
digits = list(reversed(self.spec.values()))
num_places_to_shift = np.cumsum(digits)
num_places_to_shift = [0, *num_places_to_shift[:-1]]
self._num_places_to_shift = OrderedDict(zip(fields,
num_places_to_shift))
# Determine ranges for slicing into str representations of tags.
spec_indices = [spec.index(field) for field in self.spec.keys()]
spec_indices.append(None)
self._field_slices = [
slice(spec_indices[i], spec_indices[i + 1])
for i in range(self.num_fields)
]
# Generate ufunc for parsing.
self._parse = np.frompyfunc(self._parse_single, nin=1, nout=1)
# Generate dtype for parse_record.
self.dtype = self._record_dtype()
#===========================================================================
# Parse to named tuples
#===========================================================================
def process_tag(self, tag: int):
"""Process a single tag.
Parameters
----------
tag : int
Integer tag to process. Must have n or fewer digits, where n is the
length of the specifier used to construct this object.
Returns
-------
Tag
Tag processed into descriptive fields.
"""
warnings.warn(
'`process_tag` has been deprecated, and will be removed '
'in version 1.0.0. Use `parse` instead.',
category=DeprecationWarning)
return self._parse_single(tag)
def _parse_single(self, tag: int):
tagstr = f'{tag:0{self.max_length}d}'
if len(tagstr) > self.max_length:
raise ValueError(f'tag {tagstr!r} exceeds specified length')
field_values = {}
for field, slice in zip(self.spec.keys(), self._field_slices):
map_field = self.mapping.get(field, lambda x: x)
field_values[field] = map_field(int(tagstr[slice]))
return self._tagfactory(**field_values)
def parse(self, tags: t.Union[int, np.ndarray]):
"""Parse tags.
Parameters
----------
tags : array_like
Integer tag(s) to parse. Each tag must have n or fewer digits, where
n is the length of the specifier used to construct this object.
Returns
-------
array[Tag]
Tags parsed into descriptive fields.
Example
-------
>>> tagger = Tagger(['kind', 'story', 'story', 'num', 'num'])
>>> tagger.parse(10101)
Tag(kind=1, story=1, num=1)
>>> tagger.parse([10101, 10102])
array([Tag(kind=1, story=1, num=1), Tag(kind=1, story=1, num=2)],
dtype=object)
"""
parsed: t.Union[t.NamedTuple, np.ndarray] = self._parse(tags)
return parsed
#===========================================================================
# Parse to record arrays
#===========================================================================
def _smallest_integer_type(self, field):
# object dtype to prevent overflow
nbits = np.array([8, 16, 32, 64], dtype=object)
uint_max = 2**nbits - 1
nbits = nbits[self.max(field) <= uint_max][0]
return np.dtype(f'uint{nbits}')
def _record_dtype(self):
fields = self.spec.keys()
dtypes = [self._smallest_integer_type(field) for field in fields]
return np.dtype([*zip(fields, dtypes)])
def parse_record(self, tags: t.Union[int, np.ndarray]):
"""Parse tags into a NumPy record array.
Parameters
----------
tags : array_like
Integer tag(s) to parse. Each tag must have n or fewer digits, where
n is the length of the specifier used to construct this object.
Returns
-------
recarray
Tags parsed into record array.
Example
-------
>>> tagger = Tagger(['kind', 'story', 'story', 'num', 'num'])
>>> tagger.parse(10101)
rec.array((1, 1, 1),
dtype=[('kind', 'u1'), ('story', 'u1'), ('num', 'u1')])
>>> tagger.parse([10101, 10102])
rec.array([(1, 1, 1), (1, 1, 2)],
dtype=[('kind', 'u1'), ('story', 'u1'), ('num', 'u1')])
Record arrays can be accessed in multiple ways:
>>> tags = tagger.parse([10101, 10102, 10203])
>>> tags.kind
array([1, 1, 1], dtype=uint8)
>>> tags[1]
(1, 1, 2)
>>> type(tags[1])
numpy.record
"""
tags: np.ndarray = np.asarray(tags)
if np.any(tags < 0):
raise ValueError('Tags must be non-negative')
#--------------------------
# Check length of tags.
#--------------------------
# Ignore divide by zero warning that occurs with log10(0) -> -inf.
with np.errstate(divide='ignore'):
nd = np.log10(tags)
nd = np.maximum(nd, 0) # Catch edge case where log10(0) -> -inf.
nd = np.max(nd) # Get the maximum length out of all the tags.
nd = np.ceil(nd).astype('uint') # Get a usable integer
if nd > self.max_length:
raise ValueError(f'tags have at most {nd} digits, exceeds maximum '
f'digits for the spec ({self.max_length})')
#-------------
# Parse
#-------------
# Construct "index" to pull appropriate pieces out of each integer.
digit_index: np.ndarray = np.array(
[*reversed(self._num_places_to_shift.values())])
digit_index.shape += (1, ) * tags.ndim
indexed = tags // 10**digit_index
# Tags are spread out along axis 0, apply modulo along it to get values.
exponents = np.array([*self.spec.values()])
parsed = np.apply_along_axis(np.mod, 0, indexed, 10**exponents)
# Convert to structured array. Transpose once to match with dtype shape,
# and again to match shape of `tags`.
struct = unstructured_to_structured(parsed.T, dtype=self.dtype).T
return np.rec.array(struct)
#===========================================================================
# Generate tags
#===========================================================================
def tag(self, *values, **kwvalues):
"""Create tags from the spec.
Values can be specified either using positional or keyword arguments,
but not a mix of both. All fields must have a value specified. Values
must be scalars or castable to a NumPy array. Non-integer values must be
castable to int.
If arrays are specified for the values, they must be vectors (i.e. only
one non-singular dimension). The tags are returned as an array of
integers
Parameters
----------
*values : array_like
The values of the fields in the tags, specified as positional
arguments. Must be specified in the same order as the spec. Cannot
be mixed with **kwvalues.
**kwvalues : array_like
The values of the fields in the tags, specified as keyword
arguments. Must be specified by field name. Cannot be mixed with
*values.
Returns
-------
np.ndarray
Array of the created tag(s). For N inputs of length A, B, C, ..., an
array of size A-by-B-by-C-by-... is created. Dimensions with length
1 are squeezed out.
Examples
--------
>>> tagger = Tagger({'num': 2, 'story': 2, 'kind': 1})
>>> tagger.tag()
"""
if values and kwvalues:
raise ValueError('Cannot mix positional and keyword arguments')
# If values specified by position, transform to dict
if values:
name_value_generator = zip(self.spec.keys(), values)
else:
name_value_generator = kwvalues.items()
# Transform inputs to NumPy arrays of integers in a dict
values = {
name: np.asarray(value).astype(int, copy=False)
for name, value in name_value_generator
}
# Values must be provided for all fields
if len(values) != self.num_fields:
raise ValueError('Insufficient number of values '
f'(expected {self.num_fields}, got {len(values)})')
# Check inputs.
for field, value in values.items():
# Only scalars and vectors allowed
if value.squeeze().ndim > 1:
raise ValueError('Specified values must be scalars or vectors')
# Negative values don't make any sense here.
if np.any(value < 0):
raise ValueError('All values must be nonnegative integers')
# Check bounds.
if np.any(value > self.max(field)):
raise ValueError(f'{value} exceeds the available digits '
f'for field {field!r}')
# Create the tags. The passed values are reshaped into vectors with
# ndim == num_fields. The non-singular dimension is different for each
# vector, so for inputs of length M, N, and P, an array of size
# M-by-N-by-P is created.
tags = np.zeros([v.size for v in values.values()], dtype=int)
for i, field in enumerate(self.spec.keys()):
field_value = values[field].reshape(
[-1 if j == i else 1 for j in range(self.num_fields)])
tags += field_value * 10**self._num_places_to_shift[field]
# Squeeze the generated array to remove any singular dimensions.
return tags.squeeze()
def max(self, field):
"""Return the maximum possible value for a field."""
# for a field that supports the range 000-999, there are three digits,
# so the maximum value is 10**3 - 1 = 999.
return 10**self.spec[field] - 1
|
<reponame>nik849/Odds<gh_stars>1-10
import atexit
import time
from apscheduler.scheduler import Scheduler
from flask import Flask, render_template, request, send_file, session
from odds.api import telegram, totalcorner
from odds.config import (CONFIG, HOST_URL, configs, telegram_id, test_token,
tips, totalcorner_test_token)
from odds.errors import OddsError
from odds.scraper import scrape
from odds.utils import predictions
app = Flask(__name__)
app.secret_key = 'key'
cron = Scheduler(daemon=True)
s = scrape(HOST_URL)
t = telegram(token=test_token)
tc = totalcorner(token=totalcorner_test_token)
values = [0, 0, 0, 0, 0]
cron.start()
tips_page = []
token = totalcorner_test_token
@app.route('/', methods=['POST', 'GET'])
def home():
global values, tips_page
labels = ['00:00', '00:15', '00:30', '00:45', '01:00']
values.append(len(tips_page))
if len(values) > 5:
del values[0]
session['labels'] = labels
session['values'] = values
return render_template('/index.html',
tips=tips_page, labels=labels, values=values)
@app.route('/index.html', methods=['POST', 'GET'])
def index():
tips_page = session.get('tips_page')
labels = session.get('labels')
values = session.get('values')
return render_template('/index.html',
tips=tips_page, labels=labels, values=values)
@app.route('/basic_table.html', methods=['POST', 'GET'])
def raw_data():
r = s.get_odds_html()
with app.app_context():
return render_template('/basic_table.html', tables=r)
@app.route('/responsive_table.html', methods=['POST', 'GET'])
def filtered_data():
games = s.get_odds_obj()
del games['Last 200 Started Games - Odds From 188bet.com']
tables = []
for name, game in games.items():
print(name)
p = predictions(game)
data, preds = p.return_predictions()
data = data.to_html(classes="table table-hover")
tables.append(data)
tables = u''.join(tables)
with app.app_context():
return render_template('/responsive_table.html', tables=tables)
@app.route('/config.html', methods=['POST', 'GET'])
def config():
return render_template('/config.html')
@app.route('/config_update', methods=['POST', 'GET'])
def config_update():
checks = []
for checkbox in configs:
value = request.form.get(checkbox)
if value:
checks.append(checkbox)
session['checks_'] = checks
return render_template('/config.html', configs=checks, users=telegram_id)
@app.route('/user_update', methods=['POST', 'GET'])
def user_update():
user = request.form.get("user_input")
if request.form["submit"] == "add":
if user:
telegram_id.append(user)
print(f'{user} added.')
elif request.form["submit"] == "remove":
if user in telegram_id:
if user in telegram_id:
telegram_id.remove(user)
print(f'{user} removed.')
checks = session.get('checks_', None)
if not checks:
checks = [0]
return render_template('/config.html', configs=checks, users=telegram_id)
@app.route('/token_update', methods=['POST', 'GET'])
def token_update():
global token
token = request.form.get("token")
return (''), 204
@app.route('/download', methods=['POST', 'GET'])
def download():
r = s.download(config=CONFIG)
filename = f'download_{time.strftime("%Y-%m-%d_%H-%M")}.csv'
r.to_csv(filename)
return send_file(filename, as_attachment=True)
@app.route('/download_preds', methods=['POST', 'GET'])
def download_preds():
with app.test_request_context():
games = s.get_odds_obj()
del games['Last 200 Started Games - Odds From 188bet.com']
tables = []
tips_page_all = []
for name, game in games.items():
p = predictions(game)
data, preds = p.return_predictions()
tables.append(data)
game_time = data.ix[data.index[1], 0]
checks = configs # session.get('checks_', None)
if checks:
for key in checks:
if preds[key] != 0:
tip = f'{game_time} : {name} - {tips[key]}'
tips_page_all.append(tip)
filename = f'preds{time.strftime("%Y-%m-%d_%H-%M")}.txt'
with open(filename, 'w') as f:
for tip in tips_page_all:
f.write(f'{tip}\n')
return send_file(filename, as_attachment=True)
@app.route('/hook', methods=['POST', 'GET'])
def handle_messages():
if request.method == 'POST':
data = request.json
text = {'user': data['message']['from']['id'],
'message': data['message']['text']}
result = t.process_message(text)
reply = s.get_odds_obj(config=result)
print(reply)
t.send_message(reply, text['user'])
return (''), 204
@cron.interval_schedule(minutes=15)
def interval_download():
global tips_page
time_now = int(time.strftime("%H"))
if time_now >= 23:
tips_page = []
with app.test_request_context():
tc_data = tc.get_odds()
games = s.get_odds_obj()
del games['Last 200 Started Games - Odds From 188bet.com']
tables = []
ao_teams = []
for name, game in games.items():
p = predictions(game)
data, preds = p.return_predictions()
tables.append(data)
game_time = data.ix[data.index[1], 0]
team_a = data.ix[data.index[1], 1]
team_b = data.ix[data.index[2], 1]
ao_teams.append(team_a)
ao_teams.append(team_b)
checks = configs # session.get('checks_', None)
if checks:
for key in checks:
if preds[key] != 0:
tip = f'{game_time} : {name}, {team_a} vs {team_b}:\
{tips[key]}'
if tip not in tips_page:
tips_page.append(tip)
tc_tips = []
for match in tc_data:
if (match["h"] or match["a"]) in ao_teams:
try:
tc_tip = f'InPlay: {match["h"]} vs {match["a"]}, \
Pre-Match Odds: {match["p_odds"]}, \
InPlay Odds: {match["i_odds"]}'
print(tc_tip)
except Exception as e:
raise OddsError(str(data['error']))
if tc_tip not in tc_tips:
tc_tips.append(tc_tip)
for tip in tips_page:
t.send_message(tip, telegram_id)
for tip in tc_tips:
t.send_message(tip, telegram_id)
with open(f'preds{time.strftime("%Y-%m-%d_%H-%M")}.txt', 'w') as f:
for tip in tips_page:
f.write(f'{tip}\n')
for tip in tc_tips:
f.write(f'{tip}\n')
return (''), 204
atexit.register(lambda: cron.shutdown(wait=False))
if __name__ == "__main__":
app.run(debug=True)
|
<reponame>Christophe-Foyer/tracking_turtlebot
#!/usr/bin/env python3
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from std_msgs.msg import Float64
from tf.transformations import euler_from_quaternion
import math
from tracking_turtlebot import clamp, makeSimpleProfile, PID
# Minimums from Burger/Waffle
MAX_LIN_VEL = 0.22
MAX_ANG_VEL = 1.82
# max vel changes
LIN_VEL_STEP_SIZE = 0.01
ANG_VEL_STEP_SIZE = 0.1
class TurtleBot:
def __init__(self):
rospy.init_node('turtlebot_controller', anonymous = True)
# Create a publisher for movement
self.pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
# Create an odometry subscriber for pose
# Should use a magnetometer instead
self.odom_sub = rospy.Subscriber('/odom', Odometry, self.odom_callback)
# Create a subscriber for polaris heading
self.heading_sub = rospy.Subscriber('/heading_polaris', Float64,
self.heading_callback)
# Create a velocity subscriber
self.vel_sub = rospy.Subscriber('/cmd_vel', Twist, self.vel_callback)
# Initialize Variables
self.target_heading = 0.0
self.control_angular_vel = 0.0
self.rate = rospy.Rate(5) # 5hz
#(a higher rate would be good but my VM struggles
self.twist = Twist() # initialize with zeros
self.odom_callback(Odometry()) # initialize with zeros
self.heading_polaris = self.rpy['yaw'] # initialize to current yaw
# PID might be overkill. Poorly tuned PID, but it's fine
self.pid = PID(P=0.1, I=0.0, D=0)
# Run loop
self.face_polaris()
def odom_callback(self, msg):
self.pose = msg.pose.pose
# for conveinnece convert to rpy
orientation_q = self.pose.orientation
orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]
(roll, pitch, yaw) = euler_from_quaternion(orientation_list)
self.rpy = {'roll':roll*360/math.pi,
'pitch':pitch*360/math.pi,
'yaw':yaw*360/math.pi}
def heading_callback(self, heading):
self.heading_polaris = heading.data
def vel_callback(self, twist):
self.twist = twist
def face_polaris(self):
while not rospy.is_shutdown():
self.rotate()
def rotate(self):
# TODO: implement logic to take the shortest path to orientation
target = self.heading_polaris # For debugging, should subscribe to heading
state = self.rpy['yaw']
self.target_angular_vel = self.pid.calc(target, state)
# print("state: ", round(state), ';',
# 'error: ', round(state-target), ';')
twist = Twist()
# check the robot is stopped before reorienting
if all([self.twist.linear.x == 0,
self.twist.linear.y == 0,
self.twist.linear.z == 0]):
twist.linear = self.twist.linear
# This might causes issues with slop in the PID controller
control_angular_vel = makeSimpleProfile(self.control_angular_vel,
self.target_angular_vel,
(ANG_VEL_STEP_SIZE/2.0))
control_angular_vel = clamp(control_angular_vel,
-MAX_ANG_VEL, MAX_ANG_VEL)
self.control_angular_vel = control_angular_vel
twist.angular.x = 0.0
twist.angular.y = 0.0
twist.angular.z = control_angular_vel
self.pub.publish(twist)
else: # something else is controlling the robot, wait for it to stop
rospy.loginfo('Something else is trying to move the robot')
self.pid.empty_states() # Reset the PID
if __name__ == '__main__':
try:
turtle = TurtleBot()
except rospy.ROSInterruptException:
pass |
<gh_stars>0
"""
Functions used for distorting images for training
"""
# Import the necessary libraries
import numpy as np
import torch
from scipy.ndimage.filters import gaussian_filter
from PIL import Image
# Import the necessary source codes
from Preprocessing.utils import dct_2d
from Preprocessing.utils import idct_2d
from Preprocessing.utils import ycbcr2rgb
# Initialise the default device
DEVICE = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
def distort_image(path, factor, sigma=1, blur=True):
"""
Distorts image by bluring it, decreasing its resolution
by some factor, then increasing resolution - by bicubic
interpolation.
Args:
path (string): absolute path to an image file
factor (int): the resolution factor for interpolation
sigma (float): the std. dev. to use for the gaussian blur
blur (boolean): if True, gaussian blur is performed on im
Returns:
blurred_img (numpy.ndarray): distorted image in YCbCr with
type uint8
"""
image_file = Image.open(path)
im = np.array(image_file.convert('YCbCr'))
im_Y, im_Cb, im_Cr = im[:, :, 0], im[:, :, 1], im[:, :, 2]
im_Y = (im_Y.astype(np.int16)).astype(np.int64)
im_Cb = (im_Cb.astype(np.int16)).astype(np.int64)
im_Cr = (im_Cr.astype(np.int16)).astype(np.int64)
if blur:
im_Y_blurred = gaussian_filter(im_Y, sigma=sigma)
else:
im_Y_blurred = im_Y
im_blurred = np.copy(im)
im_blurred[:, :, 0] = im_Y_blurred
im_blurred[:, :, 1] = im_Cb
im_blurred[:, :, 2] = im_Cr
width, length = im_Y.shape
im_blurred = Image.fromarray(im_blurred, mode='YCbCr')
im_blurred = im_blurred.resize(size=(int(length / factor),
int(width / factor)),
resample=Image.BICUBIC)
im_blurred = im_blurred.resize(size=(length, width),
resample=Image.BICUBIC)
im_blurred = np.array(im_blurred.convert('YCbCr'))
return im_blurred
def jpeg_distort(img):
"""
Takes an image and applies quantization steps of JPEG
encoding and returns the image with JPEG artifacts.
NOTE: compression done only on Y/luminance channel
Args:
img (numpy array): YCbCr image in the form of a 3D array
Returns:
compressed_img (numpy array): version of img with JPEG artifacts
"""
# Crop image to be multiple of 8 in both dimension
max_x = int(img.shape[0] / 8) * 8
max_y = int(img.shape[1] / 8) * 8
im = img[0:max_x, 0:max_y, :]
im_Y, im_Cb, im_Cr = im[:, :, 0], im[:, :, 1], im[:, :, 2]
im_Y_tensor = torch.tensor(im_Y, dtype=torch.float).to(DEVICE)
qt_Y = np.array([[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101],
[72, 92, 95, 98, 112, 100, 103, 99]])
qt_Y = np.tile(qt_Y, (int(im_Y.shape[0] / 8), int(im_Y.shape[1] / 8)))
qt_Y = torch.tensor(qt_Y.astype(np.float32) * 3).to(DEVICE)
# Get dictionary coefficients of uncompressed image
Y_dct = torch.empty_like(im_Y_tensor).to(DEVICE)
for i in range(0, im_Y.shape[0], 8):
for j in range(0, im_Y.shape[0], 8):
Y_dct[i:i + 8, j:j + 8] = dct_2d(im_Y_tensor[i:i + 8, j:j + 8], norm='ortho')
# Get JPEG compressed version of image - this will be input to the ARCNN
Y_dct_quantized = torch.round((Y_dct / qt_Y)) * qt_Y
Y_dct_inv = torch.empty_like(Y_dct, dtype=torch.float).to(DEVICE)
for i in range(0, im_Y.shape[0], 8):
for j in range(0, im_Y.shape[0], 8):
Y_dct_inv[i:i + 8, j:j + 8] = idct_2d(Y_dct_quantized[i:i + 8, j:j + 8], norm='ortho')
compressed_img = np.zeros(im.shape)
compressed_img[:, :, 0] = Y_dct_inv.detach().cpu().numpy()
compressed_img[:, :, 1] = im_Cb
compressed_img[:, :, 2] = im_Cr
compressed_img = ycbcr2rgb(compressed_img)
return compressed_img
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import gym
from gym import Envs, spaces
from gym.utils import seeding
from gym.envs.registration import register
import numpy as np
import random as rd
import math
class HRI_StationaryEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
####### Can I do this? #######
self.max_action = math.pi # velocity
self.min_action = -math.pi
self.min_position = [-math.pi, 0.0]
self.max_position = [math.pi, 20.0]
self.speed = 2.0
self.action = [0, 0]
self.low_state = np.array([self.min_position[0], self.min_position[1], self.min_action])
self.high_state = np.array([self.max_position[0], self.max_position[1], self.max_action])
self.viewer = None
self.action_space = spaces.Box(np.array([self.min_action]), np.array([self.max_action]))
self.observation_space = spaces.Box(self.low_state, self.high_state)
self._seed()
self.reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
position = self.position
self.action = action
"""
for i in range(len(position)):
position[i] = position[i] + action[i]
if position[i] > self.max_position:
position[i] = self.max_position
elif position[i] < self.min_position:
position[i] = self.min_position
distance = np.linalg.norm(np.subtract(position, self.goal_position))
"""
alpha = np.arctan2((position[1]*np.sin(position[0]) + 2.0*np.sin(action)),(position[1]*np.cos(position[0]) + 2.0*np.cos(action)))[0]
distance = np.sqrt((position[1]*np.cos(position[0]) + 2.0*np.cos(action))**2 + (position[1]*np.sin(position[0]) + 2.0*np.sin(action))**2)
if distance > 20.0:
distance = 20.0
done = bool(distance <= 10.0 or abs(self.position[1] - 20.0) < 0.1) #and np.linalg.norm(action) == 0)
reward = 0.0
if done and abs(self.position[1] - 20.0) < 0.1:
reward = -15.0
elif done and distance <= 10.0:
print ("#############")
print ("DONE")
print ("#############")
reward += 15.0
#print (self.distance)
#print (self.position)
#if self.distance > 20.0:
#reward += 300.0*(2.0**(-np.sqrt(self.distance)))
#else:
reward -= 1.0
#if one and np.action
print ("Reward")
print (reward)
#if np.linalg.norm(self.distance - distance) <= 0.3:
#reward -= float(self.step)*0.05
self.distance = distance
self.position = np.array([alpha, distance])
#self.state = np.array([self.goal_position[0]- position[0], self.goal_position[1] - position[1]])
self.state = np.array([alpha, distance, action[0]])
#self.step += 1
return self.state, reward, done, {}
def _reset(self):
#x,y = np.random.uniform(self.min_position, self.max_position, 2)
#x1,y1 = np.random.uniform(10.0, self.max_position, 2)
#x1s, y1s = np.random.uniform(-1.0, 1.0, 2)
alpha = np.random.uniform(-np.pi, np.pi)
distance = np.random.uniform(10.0, 20.0)
#self.position = [x1*x1s/abs(x1s), y1*y1s/abs(y1s)]
self.position = np.array([alpha, distance])
self.goal_position = np.array([0.0, 0.0])
#self.distance = np.linalg.norm([x1*x1s/abs(x1s),y1*y1s/abs(y1s)])
#self.state = np.array([self.goal_position[0] - self.position[0], self.goal_position[1] - self.goal_position[1]])
self.state = np.array([alpha, distance, 0.0])
if self.viewer is not None:
self.viewer.close()
self.viewer = None
#self.robtrans.set_translation((self.position[0]-self.min_position)*scale, (self.position[1]-self.min_position)*scale)
#self.pedtrans.set_translation((self.goal_position[0]-self.min_position)*scale, (self.goal_position[1]-self.min_position)*scale)
return np.array(self.state)
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 400
screen_height = 400
world_width = self.max_position[1]*2.0 # - self.min_position
scale = screen_width/world_width
min = -self.max_position[1]
alpha = self.position[0]
distance = self.position[1]
if self.viewer is None:
from gym.envs.RobotHumanInteraction import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
self.robtrans = rendering.Transform()
pedestrian = rendering.make_circle(radius = 10)
self.pedtrans = rendering.Transform()
#print (self.goal_position)
pedestrian.add_attr(rendering.Transform(translation=(( - min) *scale, (-min)*scale)))
self.robot = rendering.make_circle(radius = 10, filled= False)
self.robot.add_attr(self.robtrans)
#self.robot.add_attr(rendering.Transform(translation = ((np.cos(alpha)*distance-min)*scale, (np.sin(alpha)*distance-min)*scale)))
self.viewer.add_geom(self.robot)
self.viewer.add_geom(pedestrian)
self.robtrans.set_translation((np.cos(alpha)*distance-min)*scale, (np.sin(alpha)*distance-min)*scale)
else:
#self.robtrans.set_translation(10.0, 10.0)
self.robtrans.set_translation((np.cos(alpha)*distance-min)*scale, (np.sin(alpha)*distance-min)*scale)
print (self.position)
return self.viewer.render(return_rgb_array = mode=='rgb_array')
register(
id='HRI_Stationary-v0',
entry_point='gym.envs.RobotHumanInteraction:HRI_StationaryEnv',
tags={'wrapper_config.TimeLimit.max_episode_steps': 15},
) |
"""
htmlx.webapi.fetch
====================================
https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API
"""
# TODO - untested. moving these over from javascript module
# TODO - check if promise also needs to come to this package
# @staticmethod
def fetch(url: str, **kwargs):
# undocumented - warning. use at own risk
# note - kinda pointless atm. just use requests directly and you wont have to muck about with a Promise
if type(url) is not str:
raise ValueError(
"fetch takes a single url string. use fetch_set, fetch_threaded or fetch_pooled"
)
f = Promise()
r = window._do_request(url, f, *kwargs)
return f.resolve(r)
# @staticmethod
def fetch_set(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
for url in urls:
r = window.fetch(url, **kwargs).then(callback_function)
f.results.append(r.data)
return f
# @staticmethod
def fetch_threaded(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing using threads
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
jobs = []
for url in urls:
thread = threading.Thread(target=window._do_request(url, f, **kwargs))
thread.setDaemon(True)
jobs.append(thread)
map(lambda j: j.start(), jobs)
map(lambda j: j.join(), jobs)
# f = FetchedSet()
return f
# @staticmethod
def fetch_pooled(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing using a pool
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
def _do_request_wrapper(obj):
url = obj["url"]
f = obj["f"]
kwargs = obj["k"]
kwargs["callback_function"] = obj["c"]
kwargs["error_handler"] = obj["e"]
window._do_request(url, f, **kwargs)
jobs = []
p = Pool()
urls = [
{"url": url, "f": f, "c": callback_function, "e": error_handler, "k": kwargs}
for url in urls
]
results = p.map(_do_request_wrapper, urls)
p.close()
p.join()
return f
# def fetch_aysnc( urls: list, options={}, type="async" ):
# TODO - a version using async/await
class Headers:
def __init__(self, headers=None):
self.headers = headers
def get(self, name):
return self.headers[name]
def set(self, name, value):
self.headers[name] = value
def has(self, name):
return name in self.headers
def keys(self):
return self.headers.keys()
def values(self):
return self.headers.values()
def entries(self):
return self.headers.entries()
def delete(self, name):
del self.headers[name]
def forEach(self, callback, thisArg=None):
for name, value in self.headers.items():
callback(value, name, self)
def map(self, callback, thisArg=None):
return [callback(value, name, self) for name, value in self.headers.items()]
def filter(self, callback, thisArg=None):
return [
callback(value, name, self)
for name, value in self.headers.items()
if callback(value, name, self)
]
def reduce(self, callback, initialValue):
return [
callback(initialValue, value, name, self)
for name, value in self.headers.items()
]
def toString(self):
return str(self.headers)
def toObject(self):
return self.headers
def toJSON(self):
return self.headers
def __str__(self):
return str(self.headers)
def __repr__(self):
return str(self.headers)
def __iter__(self):
return self.headers.__iter__()
def __next__(self):
return self.headers.__next__()
def __getitem__(self, key):
return self.headers[key]
def __setitem__(self, key, value):
self.headers[key] = value
def __delitem__(self, key):
del self.headers[key]
def __contains__(self, key):
return key in self.headers
class Response:
def __init__(self, url=None, status=None, statusText=None, headers=None, body=None):
self.url = url
self.status = status
self.statusText = statusText
self.headers = headers
self.body = body
def arrayBuffer(self):
return self.body
def blob(self):
return self.body
def formData(self):
return self.body
def json(self):
return self.body
def text(self):
return self.body
def clone(self):
return Response(self.url, self.status, self.statusText, self.headers, self.body)
def __str__(self):
return str(self.body)
def __repr__(self):
return str(self.body)
def __iter__(self):
return self.body.__iter__()
def __next__(self):
return self.body.__next__()
def __getitem__(self, key):
return self.body[key]
def __setitem__(self, key, value):
self.body[key] = value
def __delitem__(self, key):
del self.body[key]
def __contains__(self, key):
return key in self.body
class Request:
def __init__(
self,
url=None,
method=None,
headers=None,
body=None,
mode=None,
credentials=None,
cache=None,
):
self.url = url
self.method = method
self.headers = headers
self.body = body
self.mode = mode
self.credentials = credentials
self.cache = cache
def clone(self):
return Request(
self.url,
self.method,
self.headers,
self.body,
self.mode,
self.credentials,
self.cache,
)
def arrayBuffer(self):
return self.body
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from dgl import model_zoo
from torch.utils.data import DataLoader
import math, random, sys
import argparse
from collections import deque
import rdkit
from jtnn import *
torch.multiprocessing.set_sharing_strategy('file_system')
def worker_init_fn(id_):
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
worker_init_fn(None)
parser = argparse.ArgumentParser(description="Training for JTNN",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-t", "--train", dest="train", default='train', help='Training file name')
parser.add_argument("-v", "--vocab", dest="vocab", default='vocab', help='Vocab file name')
parser.add_argument("-s", "--save_dir", dest="save_path", default='./',
help="Path to save checkpoint models, default to be current working directory")
parser.add_argument("-m", "--model", dest="model_path", default=None,
help="Path to load pre-trained model")
parser.add_argument("-b", "--batch", dest="batch_size", default=40,
help="Batch size")
parser.add_argument("-w", "--hidden", dest="hidden_size", default=200,
help="Size of representation vectors")
parser.add_argument("-l", "--latent", dest="latent_size", default=56,
help="Latent Size of node(atom) features and edge(atom) features")
parser.add_argument("-d", "--depth", dest="depth", default=3,
help="Depth of message passing hops")
parser.add_argument("-z", "--beta", dest="beta", default=1.0,
help="Coefficient of KL Divergence term")
parser.add_argument("-q", "--lr", dest="lr", default=1e-3,
help="Learning Rate")
parser.add_argument("-T", "--test", dest="test", action="store_true",
help="Add this flag to run test mode")
args = parser.parse_args()
dataset = JTNNDataset(data=args.train, vocab=args.vocab, training=True)
vocab_file = dataset.vocab_file
batch_size = int(args.batch_size)
hidden_size = int(args.hidden_size)
latent_size = int(args.latent_size)
depth = int(args.depth)
beta = float(args.beta)
lr = float(args.lr)
model = model_zoo.chem.DGLJTNNVAE(vocab_file=vocab_file,
hidden_size=hidden_size,
latent_size=latent_size,
depth=depth)
if args.model_path is not None:
model.load_state_dict(torch.load(args.model_path))
else:
for param in model.parameters():
if param.dim() == 1:
nn.init.constant_(param, 0)
else:
nn.init.xavier_normal_(param)
model = cuda(model)
print("Model #Params: %dK" % (sum([x.nelement() for x in model.parameters()]) / 1000,))
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
MAX_EPOCH = 100
PRINT_ITER = 20
def train():
dataset.training = True
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4,
collate_fn=JTNNCollator(dataset.vocab, True),
drop_last=True,
worker_init_fn=worker_init_fn)
for epoch in range(MAX_EPOCH):
word_acc, topo_acc, assm_acc, steo_acc = 0, 0, 0, 0
for it, batch in enumerate(dataloader):
model.zero_grad()
try:
loss, kl_div, wacc, tacc, sacc, dacc = model(batch, beta)
except:
print([t.smiles for t in batch['mol_trees']])
raise
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
print("KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Loss: %.6f" % (
kl_div, word_acc, topo_acc, assm_acc, steo_acc, loss.item()))
word_acc, topo_acc, assm_acc, steo_acc = 0, 0, 0, 0
sys.stdout.flush()
if (it + 1) % 1500 == 0: # Fast annealing
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(model.state_dict(),
args.save_path + "/model.iter-%d-%d" % (epoch, it + 1))
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(model.state_dict(), args.save_path + "/model.iter-" + str(epoch))
def test():
dataset.training = False
dataloader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=0,
collate_fn=JTNNCollator(vocab, False),
drop_last=True,
worker_init_fn=worker_init_fn)
# Just an example of molecule decoding; in reality you may want to sample
# tree and molecule vectors.
for it, batch in enumerate(dataloader):
gt_smiles = batch['mol_trees'][0].smiles
print(gt_smiles)
model.move_to_cuda(batch)
_, tree_vec, mol_vec = model.encode(batch)
tree_vec, mol_vec, _, _ = model.sample(tree_vec, mol_vec)
smiles = model.decode(tree_vec, mol_vec)
print(smiles)
if __name__ == '__main__':
if args.test:
test()
else:
train()
print('# passes:', model.n_passes)
print('Total # nodes processed:', model.n_nodes_total)
print('Total # edges processed:', model.n_edges_total)
print('Total # tree nodes processed:', model.n_tree_nodes_total)
print('Graph decoder: # passes:', model.jtmpn.n_passes)
print('Graph decoder: Total # candidates processed:', model.jtmpn.n_samples_total)
print('Graph decoder: Total # nodes processed:', model.jtmpn.n_nodes_total)
print('Graph decoder: Total # edges processed:', model.jtmpn.n_edges_total)
print('Graph encoder: # passes:', model.mpn.n_passes)
print('Graph encoder: Total # candidates processed:', model.mpn.n_samples_total)
print('Graph encoder: Total # nodes processed:', model.mpn.n_nodes_total)
print('Graph encoder: Total # edges processed:', model.mpn.n_edges_total)
|
<gh_stars>0
#!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
# http://www.imagemagick.org/Usage/draw/#arcs
w = 100
h = 60
bgcolor = Color('skyblue')
# original imagemagick command:
# Elliptical Arcs : A radius_x,y angle large,sweep x,y
# convert -size 100x60 xc:skyblue -fill white -stroke black \
# -draw "path 'M 30,40 A 30,15 0 0,0 70,20'" path_arc.gif
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.path_start()
draw.path_move((30, 40))
draw.path_elliptic_arc(radius=(30, 15), to=(70, 20))
draw.path_finish()
draw(img)
img.save(filename='sample22a.png')
# convert -size 100x60 xc:skyblue -fill white -stroke black \
# -draw "path 'M 30,40 A 30,15 0 0,1 70,20'" path_arc2.gif
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.path_start()
draw.path_move((30, 40))
draw.path_elliptic_arc(radius=(30, 15), to=(70, 20), clockwise=True)
draw.path_finish()
draw(img)
img.save(filename='sample22b.png')
# convert -size 100x60 xc:skyblue -fill white -stroke black \
# -draw "path 'M 30,40 A 30,15 0 1,0 70,20'" path_arc3.gif
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.path_start()
draw.path_move((30, 40))
draw.path_elliptic_arc(radius=(30, 15), to=(70, 20), large_arc=True)
draw.path_finish()
draw(img)
img.save(filename='sample22c.png')
# convert -size 100x60 xc:skyblue -fill white -stroke black \
# -draw "path 'M 30,40 A 30,15 0 1,1 70,20'" path_arc4.gif
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.path_start()
draw.path_move((30, 40))
draw.path_elliptic_arc(radius=(30, 15), to=(70, 20),
large_arc=True, clockwise=True)
draw.path_finish()
draw(img)
img.save(filename='sample22d.png')
# Closed and angled elliptical arcs (defined by two edge points)
# convert -size 100x60 xc:skyblue -fill white -stroke black \
# -draw "path 'M 30,40 A 30,20 20 0,0 70,20 Z '" path_arc5.gif
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.path_start()
draw.path_move((30, 40))
draw.path_elliptic_arc(radius=(30, 20), to=(70, 20), rotation=20)
draw.path_close()
draw.path_finish()
draw(img)
img.save(filename='sample22e.png')
# convert -size 100x60 xc:skyblue -fill white -stroke black \
# -draw "path 'M 30,40 A 30,20 20 1,1 70,20 Z '" path_arc6.gif
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.path_start()
draw.path_move((30, 40))
draw.path_elliptic_arc(radius=(30, 20), to=(70, 20), rotation=20,
large_arc=True, clockwise=True)
draw.path_close()
draw.path_finish()
draw(img)
img.save(filename='sample22f.png')
# convert -size 100x60 xc:skyblue -fill white -stroke black \
# -draw "path 'M 30,40 A 30,20 20 0,0 70,20 \
# A 30,20 20 1,0 30,40 Z '" path_arc7.gif
with Image(width=w, height=h, background=bgcolor) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.path_start()
draw.path_move((30, 40))
draw.path_elliptic_arc(radius=(30, 20), to=(70, 20), rotation=20)
draw.path_elliptic_arc(radius=(30, 20), to=(30, 40), rotation=20,
large_arc=True)
draw.path_close()
draw.path_finish()
draw(img)
img.save(filename='sample22g.png')
|
<filename>tests/test_peering_service.py
from typing import AsyncGenerator
from unittest import mock
from asyncio.exceptions import TimeoutError
import pytest
from async_timeout import timeout
from sarafan.events import NewPeer, DiscoveryRequest, DiscoveryFinished, DiscoveryFailed
from sarafan.models import Peer
from sarafan.peering import PeeringService, PeerClient
from sarafan.peering.client import DiscoveryResult
from .factories import PublicationFactory
from .utils import generate_rnd_hash, generate_rnd_address
MAX_PEERS = 10
@pytest.fixture(name='peering')
async def peering_service() -> AsyncGenerator[PeeringService, None]:
peering = PeeringService(max_peer_count=MAX_PEERS)
await peering.start()
try:
yield peering
finally:
await peering.stop()
@pytest.mark.asyncio
async def test_add_peer(peering):
peer = Peer(service_id='fakepeer1')
await peering.add_peer(peer)
await peering.add_peer(peer) # check duplicates
peers_list = list(peering.peers_by_distance(generate_rnd_hash()))
assert len(peers_list) == 1
assert peer in peers_list
@pytest.mark.asyncio
async def test_remove_peer(peering):
peer = Peer(service_id='removepeer1')
await peering.add_peer(peer)
client = peering.get_client(peer)
assert isinstance(client, PeerClient)
await peering.remove_peer(peer)
peers_list = list(peering.peers_by_distance(generate_rnd_hash()))
assert len(peers_list) == 0
@pytest.mark.asyncio
async def test_handle_new_peer_event(peering):
new_peer_event = NewPeer(
addr=generate_rnd_address(),
hostname='new_peer'
)
await peering.dispatch(new_peer_event)
peers_list = list(peering.peers_by_distance(generate_rnd_hash()))
assert len(peers_list) == 1
assert peers_list[0].address == new_peer_event.addr
assert peers_list[0].service_id == new_peer_event.hostname
@pytest.mark.asyncio
async def test_peer_cleanup(peering):
first_peer = None
for i in range(MAX_PEERS + 1):
peer = Peer(service_id=f'peer{i}', rating=i * 0.01)
if first_peer is None:
first_peer = peer
await peering.add_peer(peer)
peers_list = list(peering.peers.values())
assert len(peers_list) == MAX_PEERS
assert first_peer not in peers_list
@pytest.mark.asyncio
@mock.patch('sarafan.peering.service.PeerClient.has_magnet', side_effect=[True])
@mock.patch('sarafan.peering.service.PeerClient.discover', return_value=DiscoveryResult())
async def test_handle_discovery_request(has_magnet_mock, discover_mock, peering: PeeringService):
peer = Peer(service_id='fake_discovery')
await peering.add_peer(peer)
publication = PublicationFactory.create()
discovery_request = DiscoveryRequest(publication=publication)
service_bus = peering.bus
queue = service_bus.subscribe(DiscoveryFinished)
await peering.dispatch(discovery_request)
with timeout(1):
event: DiscoveryFinished = await queue.get()
assert event.publication == publication
assert await has_magnet_mock.called_once()
assert await discover_mock.called_once()
failed_queue = service_bus.subscribe(DiscoveryFailed)
await peering.dispatch(DiscoveryRequest(publication=publication, state=event.state))
# We should have failure on the second try (because the only node already visited)
with timeout(1):
event: DiscoveryFailed = await failed_queue.get()
assert event.publication == publication
# There are should be no DiscoveryFinished events
with pytest.raises(TimeoutError):
with timeout(0.2):
await queue.get()
@pytest.mark.asyncio
@mock.patch('sarafan.peering.service.PeerClient.has_magnet', side_effect=[False])
@mock.patch('sarafan.peering.service.PeerClient.discover', return_value=DiscoveryResult())
async def test_discovery_failed(has_magnet_mock, discover_mock, peering: PeeringService):
peer = Peer(service_id="fail_discovery")
await peering.add_peer(peer)
publication = PublicationFactory.create()
discovery_request = DiscoveryRequest(publication=publication)
service_bus = peering.bus
queue = service_bus.subscribe(DiscoveryFailed)
await peering.dispatch(discovery_request)
with timeout(1):
event: DiscoveryFailed = await queue.get()
assert event.publication == publication
assert await has_magnet_mock.called_once()
assert await discover_mock.called_once()
|
import logging
import string
import freetype
import numpy as np
from ..geometry import Size
from .core import TilesSource
log = logging.getLogger(__name__)
class TrueTypeFont(TilesSource):
"""Generate tiles from True Type Font."""
DEFAULT_DPI = 96
MONOSPACE_REFERENCE_CHARS = ['@', ]
PROPORTIONAL_REFERENCE_CHARS = string.ascii_uppercase
def __init__(self, path, size, charset=None):
self.path = path
self.face = freetype.Face(path)
if charset is None:
charset = self.get_ttf_charset()
super().__init__(charset)
self._is_monospace = None
self._tile_size = None
self._pixel_size = None
self.set_size(size)
def get_ttf_charset(self):
charset = [
code_point for (code_point, char_index)
in self.face.get_chars()
if char_index
]
return charset
def set_char_size(self, size, dpi=DEFAULT_DPI):
self.face.set_char_size(0, int(size*64), hres=dpi, vres=dpi)
self._pixel_size = None
def set_pixel_size(self, size):
min_char_size = 1
max_char_size = 96
guess_char_size = size.height
correct_char_size = None
while correct_char_size is None:
self.set_char_size(guess_char_size)
pixel_size = self.pixel_size
if pixel_size.width > size.width or pixel_size.height > size.height:
print(f'{guess_char_size} {pixel_size} too big! - {min_char_size} - {max_char_size}')
max_char_size = guess_char_size
elif pixel_size.width <= size.width and pixel_size.height <= size.height:
print(f'{guess_char_size} {pixel_size} too small! - {min_char_size} - {max_char_size}')
if guess_char_size == max_char_size or guess_char_size == min_char_size:
correct_char_size = guess_char_size
break
min_char_size = guess_char_size
guess_char_size = (min_char_size + max_char_size) // 2
def set_size(self, size):
if isinstance(size, Size):
self._tile_size = size
self.set_pixel_size(size)
else:
self._tile_size = None
self.set_char_size(size)
# TODO: Set hinting, loading flags
def has_char(self, char):
char_index = self.face.get_char_index(char)
return char_index != 0
def has_code_point(self, code_point):
char = chr(code_point)
return self.has_char(char)
def load_char(self, char):
if not isinstance(char, str):
char = chr(char)
# TODO: flags!
self.face.load_char(char)
@property
def is_monospace(self):
# NOTE: Size MUST be set first for this to work!
if self._is_monospace is None:
dot = self.load_char('.')
dot_width = self.face.glyph.advance.x//64
at = self.load_char('@')
at_width = self.face.glyph.advance.x//64
self._is_monospace = dot_width == at_width
return self._is_monospace
@property
def _reference_chars(self):
if self.is_monospace:
return self.MONOSPACE_REFERENCE_CHARS
else:
return self.PROPORTIONAL_REFERENCE_CHARS
@property
def pixel_size(self):
if self._pixel_size is None:
# NOTE: face.height might be lower than ascender + descender and last line (like in "_") might be cut off
# height = self.face.size.height//64
height = self.face.size.ascender//64 - self.face.size.descender//64
widths = []
for char in self._reference_chars:
self.load_char(char)
widths.append(self.face.glyph.metrics.width//64)
width = max(widths)
self._pixel_size = Size(width, height)
return self._pixel_size
@property
def tile_size(self):
if self._tile_size:
return self._tile_size
else:
return self.pixel_size
def get_tile(self, code_point, tile_size):
if not self.has_code_point(code_point):
return None
pixel_size = self.pixel_size
self.load_char(code_point)
bitmap = self.face.glyph.bitmap
if not bitmap.pixel_mode == freetype.FT_PIXEL_MODE_GRAY:
raise ValueError(
"Invalid pixel_mode! got: %s, expected: %s" % (bitmap.pixel_mode, freetype.FT_PIXEL_MODE_GRAY)
)
bitmap_array = np.asarray(bitmap.buffer).reshape(
(bitmap.width, bitmap.rows), order="F"
)
if bitmap_array.size == 0:
return None
offset_x = 0
if self.face.glyph.bitmap_left < 0:
offset_x = self.face.glyph.bitmap_left * -1
bitmap_array = bitmap_array[offset_x:, :]
offset_y = 0
if self.face.glyph.bitmap_top > self.face.size.ascender//64:
offset_y = self.face.glyph.bitmap_top - self.face.size.ascender//64
bitmap_array = bitmap_array[:, offset_y:]
if self.is_monospace:
left = self.face.glyph.bitmap_left + offset_x
else:
left = max(0, (pixel_size.width - self.face.glyph.metrics.width//64)//2 + offset_x)
top = self.face.size.ascender//64 - self.face.glyph.bitmap_top + offset_y
left += max(0, tile_size.width-pixel_size.width) // 2
top += max(0, tile_size.height-pixel_size.height) // 2
tile = np.zeros(tile_size, dtype=np.uint8, order="F")
# if bitmap_array.shape[0]+left > tile.shape[0]:
# print('Too wide!', chr(code_point))
# if bitmap_array.shape[1]+top > tile.shape[1]:
# print('Too tall!', chr(code_point))
bitmap_array = bitmap_array[:tile.shape[0]-left, :tile.shape[1]-top]
tile[left:left+bitmap_array.shape[0], top:top+bitmap_array.shape[1] ] = bitmap_array
return tile.transpose()
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from deepracer_msgs/SetVisualColorRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class SetVisualColorRequest(genpy.Message):
_md5sum = "c993776acc4e7a226360c9194290bf99"
_type = "deepracer_msgs/SetVisualColorRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string link_name
string visual_name
std_msgs/ColorRGBA ambient
std_msgs/ColorRGBA diffuse
std_msgs/ColorRGBA specular
std_msgs/ColorRGBA emissive
bool block
================================================================================
MSG: std_msgs/ColorRGBA
float32 r
float32 g
float32 b
float32 a
"""
__slots__ = ['link_name','visual_name','ambient','diffuse','specular','emissive','block']
_slot_types = ['string','string','std_msgs/ColorRGBA','std_msgs/ColorRGBA','std_msgs/ColorRGBA','std_msgs/ColorRGBA','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
link_name,visual_name,ambient,diffuse,specular,emissive,block
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetVisualColorRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.link_name is None:
self.link_name = ''
if self.visual_name is None:
self.visual_name = ''
if self.ambient is None:
self.ambient = std_msgs.msg.ColorRGBA()
if self.diffuse is None:
self.diffuse = std_msgs.msg.ColorRGBA()
if self.specular is None:
self.specular = std_msgs.msg.ColorRGBA()
if self.emissive is None:
self.emissive = std_msgs.msg.ColorRGBA()
if self.block is None:
self.block = False
else:
self.link_name = ''
self.visual_name = ''
self.ambient = std_msgs.msg.ColorRGBA()
self.diffuse = std_msgs.msg.ColorRGBA()
self.specular = std_msgs.msg.ColorRGBA()
self.emissive = std_msgs.msg.ColorRGBA()
self.block = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.visual_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_16fB().pack(_x.ambient.r, _x.ambient.g, _x.ambient.b, _x.ambient.a, _x.diffuse.r, _x.diffuse.g, _x.diffuse.b, _x.diffuse.a, _x.specular.r, _x.specular.g, _x.specular.b, _x.specular.a, _x.emissive.r, _x.emissive.g, _x.emissive.b, _x.emissive.a, _x.block))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.ambient is None:
self.ambient = std_msgs.msg.ColorRGBA()
if self.diffuse is None:
self.diffuse = std_msgs.msg.ColorRGBA()
if self.specular is None:
self.specular = std_msgs.msg.ColorRGBA()
if self.emissive is None:
self.emissive = std_msgs.msg.ColorRGBA()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.link_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.link_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.visual_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.visual_name = str[start:end]
_x = self
start = end
end += 65
(_x.ambient.r, _x.ambient.g, _x.ambient.b, _x.ambient.a, _x.diffuse.r, _x.diffuse.g, _x.diffuse.b, _x.diffuse.a, _x.specular.r, _x.specular.g, _x.specular.b, _x.specular.a, _x.emissive.r, _x.emissive.g, _x.emissive.b, _x.emissive.a, _x.block,) = _get_struct_16fB().unpack(str[start:end])
self.block = bool(self.block)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.visual_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_16fB().pack(_x.ambient.r, _x.ambient.g, _x.ambient.b, _x.ambient.a, _x.diffuse.r, _x.diffuse.g, _x.diffuse.b, _x.diffuse.a, _x.specular.r, _x.specular.g, _x.specular.b, _x.specular.a, _x.emissive.r, _x.emissive.g, _x.emissive.b, _x.emissive.a, _x.block))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.ambient is None:
self.ambient = std_msgs.msg.ColorRGBA()
if self.diffuse is None:
self.diffuse = std_msgs.msg.ColorRGBA()
if self.specular is None:
self.specular = std_msgs.msg.ColorRGBA()
if self.emissive is None:
self.emissive = std_msgs.msg.ColorRGBA()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.link_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.link_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.visual_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.visual_name = str[start:end]
_x = self
start = end
end += 65
(_x.ambient.r, _x.ambient.g, _x.ambient.b, _x.ambient.a, _x.diffuse.r, _x.diffuse.g, _x.diffuse.b, _x.diffuse.a, _x.specular.r, _x.specular.g, _x.specular.b, _x.specular.a, _x.emissive.r, _x.emissive.g, _x.emissive.b, _x.emissive.a, _x.block,) = _get_struct_16fB().unpack(str[start:end])
self.block = bool(self.block)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_16fB = None
def _get_struct_16fB():
global _struct_16fB
if _struct_16fB is None:
_struct_16fB = struct.Struct("<16fB")
return _struct_16fB
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from deepracer_msgs/SetVisualColorResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetVisualColorResponse(genpy.Message):
_md5sum = "2ec6f3eff0161f4257b808b12bc830c2"
_type = "deepracer_msgs/SetVisualColorResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool success
string status_message
"""
__slots__ = ['success','status_message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetVisualColorResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.status_message is None:
self.status_message = ''
else:
self.success = False
self.status_message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class SetVisualColor(object):
_type = 'deepracer_msgs/SetVisualColor'
_md5sum = '9c987659e93e8e993b90a6ea6fab5b74'
_request_class = SetVisualColorRequest
_response_class = SetVisualColorResponse
|
import numpy as np
import fitsio
from astropy.table import Table,unique
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test')
args = parser.parse_args()
print(args)
version = args.version
mtld = Table.read('/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/mtl-done-tiles.ecsv') #log of tiles completed for mtl
tiles = Table.read('/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-sv3.ecsv')
wp = tiles['PROGRAM'] == 'DARK'
tiles = tiles[wp]
wp = np.isin(mtld['TILEID'],tiles['TILEID']) #we want to consider MTL done tiles that correspond to the SV3 tile file
mtld = mtld[wp]
print('number of completed dark tiles:',len(mtld))
mtld = Table.read('/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/mtl-done-tiles.ecsv') #log of tiles completed for mtl
tiles = Table.read('/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-sv3.ecsv')
wp = tiles['PROGRAM'] == 'BRIGHT'
tiles = tiles[wp]
wp = np.isin(mtld['TILEID'],tiles['TILEID']) #we want to consider MTL done tiles that correspond to the SV3 tile file
mtld = mtld[wp]
print('number of completed bright tiles:',len(mtld))
#ran = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random0/rancomb_dark_Alltiles.fits')
ran = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random0/rancomb_dark_Alltilelocinfo.fits')
ran = unique(ran,keys=['TARGETID'])
print('#area covered on DARK tiles\n#>N_tiles area(deg2)')
for nt in np.unique(ran['NTILE']):
wt = ran['NTILE'] >= nt
print(nt,len(ran[wt])/2500)
print('#')
#ran = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random0/rancomb_bright_Alltiles.fits')
ran = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random0/rancomb_bright_Alltilelocinfo.fits')
ran = unique(ran,keys=['TARGETID'])
print('#area covered on BRIGHT tiles\n#>N_tiles area(deg2)')
for nt in np.unique(ran['NTILE']):
wt = ran['NTILE'] >= nt
print(nt,len(ran[wt])/2500)
print('#')
dat = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/datcomb_dark_tarspecwdup_Alltiles.fits')
wz = dat['ZWARN']*0 == 0
wz &= dat['ZWARN'] != 999999
datz = unique(dat[wz],keys=['TARGETID'])
wzg = dat['ZWARN'] == 0
datzg = unique(dat[wzg],keys=['TARGETID'])
print('number of unique dark time targets observed (good hardware):',len(datz))
print('number of unique dark time targets with a good observation (ZWARN==0):',len(datzg))
print('#')
dat = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/datcomb_bright_tarspecwdup_Alltiles.fits')
wz = dat['ZWARN']*0 == 0
wz &= dat['ZWARN'] != 999999
datz = unique(dat[wz],keys=['TARGETID'])
wzg = dat['ZWARN'] == 0
datzg = unique(dat[wzg],keys=['TARGETID'])
print('number of unique bright time targets observed (good hardware):',len(datz))
print('number of unique bright time targets with a good observation (ZWARN==0):',len(datzg))
print('#')
print('splitting by type, numbers are after all veto masks:')
tpl = ['ELG','LRG','QSO','BGS_ANY','MWS_ANY']
for tp in tpl:
dat = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/LSScats/'+version+'/'+tp+'Alltiles_full.dat.fits')
wz = dat['ZWARN']*0 == 0
wz &= dat['ZWARN'] != 999999
wzg = dat['ZWARN'] == 0
print('number of unique '+tp+' targets observed (good hardware):',len(dat[wz]))
print('number of unique '+tp+' targets with a good observation (ZWARN==0):',len(dat[wzg]))
ran = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/LSScats/'+version+'/'+tp+'Alltiles_0_clustering.ran.fits')
print('effective '+tp+' area, after vetoing higher-priority positions, and imaging: ',str(len(ran)/2500))
print('#')
|
<reponame>msoltysik/innovativeproject-health-care
from flask import jsonify, Response, abort, request
from flask_restful import Resource
from sqlalchemy import exc
from backend.app import db
from backend.common.permissions import roles_allowed
from backend.models import User
class EditorsRes(Resource):
"""Editors collection."""
@roles_allowed(['admin'])
def get(self):
"""Get all editors.
Returns list of all editors. It's also possible to filter the list\
using search phrase.
Roles allowed: admin.
---
tags:
- editors
security:
- bearerAuth: []
parameters:
- in: query
name: q
type: string
required: false
description: Search phrase, minimum 4 characters long.
responses:
400:
description: To short search phrase.
200:
description: List of editors.
"""
args = request.args
if 'q' in args:
# If there is a search phrase defined
phrase = args['q']
if len(phrase) <= 3:
abort(400, 'Search phrase too short, minimum length is 4 '
'characters.')
# Add a wildcard at the end of the phrase
phrase += '%'
editors = (
User.query
.filter(
User.editor == True,
db.or_(
User.full_name.ilike(phrase),
User.login.ilike(phrase),
User.mail.ilike(phrase)
)
)
.order_by(User.full_name.asc())
.all()
)
else:
# If there is no search phrase
editors = (
User.query
.filter_by(editor=True)
.order_by(User.full_name.asc())
.all()
)
response = jsonify([e.serialize() for e in editors])
response.status_code = 200
return response
class EditorRes(Resource):
"""Single editor resource: create, delete."""
@roles_allowed(['admin'])
def put(self, user_id):
"""Create an editor.
Adds user to the list of editors. Users from this list can be later\
assigned as editors of specific tribes.
Roles allowed: admin.
---
tags:
- editors
security:
- bearerAuth: []
parameters:
- in: path
name: user_id
type: integer
required: true
description: Id of the user.
responses:
201:
description: Success.
204:
description: User with given id is already an editor.
404:
description: User with given id doesn't exist.
"""
# Get user data from db or ldap
user = User.from_id(user_id)
# If id cannot be found neither in db nor ldap
if user is None:
abort(404, 'Could not find user with given identity.')
# If user exists in database and already is an editor
if (user.in_db() and user.is_editor()) is True:
response = Response()
response.status_code = 204
return response
# Set user as editor
user.editor = True
# Try to insert / update in db, catch exceptions
try:
db.session.add(user)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
response = Response()
response.status_code = 201
return response
@roles_allowed(['admin'])
def delete(self, user_id):
"""Delete an editor.
Removes user from list of editors.
Roles allowed: admin.
---
tags:
- editors
security:
- bearerAuth: []
parameters:
- in: path
name: user_id
type: integer
required: true
description: Id of the user.
responses:
204:
description: Success.
404:
description: Editor with given id doesn't exist.
"""
# Get user data from db or ldap
user = User.from_id(user_id)
# If id cannot be found neither in db nor ldap
if user is None:
abort(404, 'Could not find user with given identity.')
# If user is not in db or is not an editor
if (user.in_db() or user.is_editor()) is False:
abort(404, 'Requested editor does not exist.')
user.editor = False
user.editing.clear()
try:
db.session.add(user)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
user.revalidate()
response = Response()
response.status_code = 204
return response
|
<filename>ex_bary10.py
# Finds the E-LPIPS barycenter of ten perturbed versions of an input image.
#
# The supported perturbations include additive Gaussian noise and small shifts.
#
#
# Runs the iteration for 100 000 steps. Outputs are generated by default into directory out_bary10,
# but this directory may be changed with --outdir.
#
# The final result will be outdir/100000.png by default.
#
# This code also supports the LPIPS metric to facilitate comparisons.
#
# Usage:
# python ex_pairwise_average.py image1 image2
# python ex_pairwise_average.py image1 image2 --metric=[elpips_vgg|lpips_vgg|lpips_squeeze]
import tensorflow as tf
import numpy as np
import pdb
import os
import csv
import itertools
import time
import sys
import argparse
import elpips
import scipy.misc
import imageio
TOLERANCE = 0.00001 # How far to clip images from 0 and 1.
parser = argparse.ArgumentParser()
parser.add_argument('images', type=str, nargs=1, help='source image')
parser.add_argument('--outdir', type=str, default="out_bary10", help='output directory for intermediate files. Default: out_bary10')
parser.add_argument('--steps', type=int, default=100000, help='number of iterations to run')
parser.add_argument('--metric', type=str, default='elpips_vgg', help='(elpips_vgg, lpips_vgg, lpips_squeeze)')
parser.add_argument('--seed', type=int, default=-1, help='random seed (-1 for random)')
parser.add_argument('--count', type=int, default=10, help='number of input images. Default: 10')
parser.add_argument('--mode', type=str, help='input mode (shift, noise)')
parser.add_argument('--noise_scale', type=float, default=0.07, help='strength of noise on top of the source image')
parser.add_argument('--learning_rate', type=float, default=0.03, help='step size multiplier for the optimization')
args = parser.parse_args()
if args.metric not in ('elpips_vgg', 'elpips_squeeze_maxpool', 'lpips_vgg', 'lpips_squeeze'):
raise Exception('Unsupported metric.')
if args.mode not in ('shift', 'noise'):
raise Exception('Unsupported mode.')
def load_image(path):
_, ext = os.path.splitext(path)
if ext.lower() == '.npy':
image = np.load(path)
elif ext.lower() in ('.png', '.jpg'):
image = imageio.imread(path).astype(np.float32) / 255.0
else:
raise Exception('Unknown image type.')
return image
# Create output directory.
os.makedirs(args.outdir, exist_ok=True)
# Set random seed.
if args.seed >= 0:
np.random.seed(args.seed)
# Create inputs.
images = []
src_image = load_image(args.images[0])[:,:,0:3]
imageio.imwrite(os.path.join(args.outdir, "src_image.png"), (0.5 + 255.0 * src_image).astype(np.uint8))
src_image = np.expand_dims(src_image, 0)
for i in range(args.count):
if args.mode == 'noise':
image = src_image + args.noise_scale * np.random.randn(src_image.shape[0], src_image.shape[1], src_image.shape[2], src_image.shape[3]).astype(np.float32)
image = np.clip(image, 0.0, 1.0)
elif args.mode == 'shift':
shift_x = np.random.randint(-2, 2+1)
shift_y = np.random.randint(-2, 2+1)
N, H, W, C = src_image.shape
image = src_image[:, max(0, -shift_y) : H - max(0, shift_y), max(0, -shift_x) : W - max(0, shift_x), :]
image = np.pad(image, (
(0, 0),
(max(0, shift_y), max(0, -shift_y)),
(max(0, shift_x), max(0, -shift_x)),
(0, 0)
), 'reflect')
else:
raise Exception('Unsupported mode.')
imageio.imwrite(os.path.join(args.outdir, "input{:02d}.png".format(i)), (0.5 + 255.0 * image[0, :, :, :]).astype(np.uint8))
images.append(image)
mean_image = np.mean(images, axis=0)
imageio.imwrite(os.path.join(args.outdir, "input_mean.png".format(i)), (0.5 + 255.0 * mean_image[0, :, :, :]).astype(np.uint8))
# Initial image.
init_image = 0.5 + 0.2 * np.random.randn(images[0].shape[0], images[0].shape[1], images[0].shape[2], images[0].shape[3]).astype(np.float32)
init_image = np.clip(init_image, TOLERANCE, 1.0 - TOLERANCE)
imageio.imwrite(os.path.join(args.outdir, "initial_image.png"), (0.5 + 255.0 * init_image[0,:,:,:]).astype(np.uint8))
# Create the graph.
print("Creating graph.")
tf_images = []
for i in range(len(images)):
tf_images.append(tf.constant(images[i], dtype=tf.float32))
tf_images = tf.concat(tf_images, axis=0)
with tf.variable_scope('variables'):
tf_X = tf.get_variable('tf_X', dtype=tf.float32, initializer=init_image, trainable=True)
tf_X_uint8 = tf.cast(tf.floor(255.0 * tf.clip_by_value(tf_X, 0.0, 1.0) + 0.5), tf.uint8)[0, :, :, :]
tf_step = tf.get_variable('step', dtype=tf.int32, initializer=0, trainable=False)
tf_increase_step = tf.assign(tf_step, tf_step + 1)
tf_step_f32 = tf.cast(tf_step, tf.float32)
tf_step_f32 = tf.sqrt(100.0 ** 2 + tf_step_f32**2) - 100 # Gradual start.
# Learning rate schedule between 1/t and 1/sqrt(t).
tf_learning_rate = args.learning_rate / (1.0 + 0.02 * tf_step_f32 ** 0.75)
metric_config = elpips.get_config(args.metric)
metric_config.set_scale_levels_by_image_size(image[0].shape[1], image[0].shape[2])
model = elpips.Metric(metric_config)
# Evaluate the distance of a random input and tf_X.
tf_random_index = tf.random_uniform([], minval=0, maxval=args.count, dtype=tf.int32)
tf_loss = tf.square(model.forward(tf_X, tf_images[tf_random_index:tf_random_index+1, :, :, :]))[0]
with tf.control_dependencies([tf_increase_step]):
tf_optimizer = tf.train.AdamOptimizer(tf_learning_rate)
tf_minimize = tf_optimizer.minimize(tf_loss)
# Project to a safe distance from invalid colors.
tf_fix_X = tf.assign(tf_X, tf.clip_by_value(tf_X, TOLERANCE, 1.0 - TOLERANCE))
print("Starting session.")
gpu_options = tf.GPUOptions(allow_growth=True)
session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)
with tf.Session(config=session_config) as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
# No more modifications to the computation graph.
tf.get_default_graph().finalize()
# Specify checkpoint for visualizing intermediate results.
checkpoints = [0, 1, 2, 3, 5, 7, 10, 15, 20, 30, 50, 70, 100, 150, 200, 300, 400, 600, 900, 1200, 1500]
for i in range(2000, 1 + args.steps, 500):
checkpoints.append(i)
checkpoints.append(args.steps)
checkpoints = set(checkpoints)
stime = time.time()
# Run the iteration.
for i in range(1 + args.steps):
sess.run([tf_fix_X])
if i not in checkpoints:
# Iterate.
sess.run([tf_minimize])
else:
# Also output statistics.
kernels = []
ops = []
results = {}
kernels.append(tf_loss)
def op(x):
results['loss'] = x
ops.append(op)
kernels.append(tf_learning_rate)
def op(x):
results['learning_rate'] = x
ops.append(op)
kernels.append(tf_X_uint8)
def op(x):
results['X_uint8'] = x
ops.append(op)
kernels.append(tf_minimize)
def op(x):
pass
ops.append(op)
for x, op in zip(sess.run(kernels), ops):
op(x)
# Display results.
loss, X_uint8 = results['loss'], results['X_uint8']
etime = time.time()
print("Elapsed: {} s. Step {}/{}. Loss: {}. Learning rate: {}".format(int(etime - stime), i, args.steps, loss, results['learning_rate']))
imageio.imwrite(os.path.join(args.outdir, "{:06d}.png".format(i)), X_uint8)
if i % 10000 == 0:
X = sess.run([tf_X])
np.save(os.path.join(args.outdir, "save_{:06d}.npy".format(i)), X)
|
#!/usr/bin/env python
###############################################################################
#
# $Author$
# $Date$
# $Id$
#
# PNFS agend client
# Author: <NAME> (<EMAIL>) 08/05
#
###############################################################################
# system imports
import sys
import pprint
import stat
import os
import socket
import string
import errno
# import cPickle
# enstore imports
import option
import generic_client
import backup_client
#import udp_client
import Trace
import e_errors
import enstore_constants
import enstore_functions2
# For layer_file() and is_pnfs_path
from pnfs import is_access_name
MY_NAME = enstore_constants.PNFS_AGENT_CLIENT #"PNFS_A_CLIENT"
MY_SERVER = enstore_constants.PNFS_AGENT #"pnfs_agent"
RCV_TIMEOUT = 10
RCV_TRIES = 5
class PnfsAgentClient(generic_client.GenericClient,
backup_client.BackupClient):
def __init__( self, csc, server_address=None, flags=0, logc=None,
alarmc=None, rcv_timeout = RCV_TIMEOUT,
rcv_tries = RCV_TRIES):
#self.print_id is unique in each of pnfs.Pnfs, chimera.ChimeraFS,
# and pnfs_agent_client.PnfsAgentClient. It is to be used for
# the printing of messages to name the specific interface
# being used by namespace.StorageFS.
self.print_id = "pnfs_agent"
generic_client.GenericClient.__init__(self,csc,MY_NAME,server_address,
flags=flags, logc=logc,
alarmc=alarmc,
rcv_timeout=rcv_timeout,
rcv_tries=rcv_tries,
server_name = MY_SERVER)
self.r_ticket = {'work' : 'get_file_stat',
'filename' : "",
'statinfo' : [],
'pinfo' : {},
'bfid' : None
}
def status(self, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
return self.send({"work" : "show_state"}, rcv_timeout, tries)
def show_state(self, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
return self.send({'work':'show_state'},
rcv_timeout=rcv_timeout, tries=tries)
###############################################################################
# FIXME (Could replace with pnfs.Pnfs().layer_file()?)
def layer_file(self, filename, layer_number):
pn, fn = os.path.split(filename)
if is_access_name(fn):
return os.path.join(pn, "%s(%d)" % (fn, layer_number))
else:
return os.path.join(pn, ".(use)(%d)(%s)" % (layer_number, fn))
# FIXME (Could replace with pnfs.Pnfs().access_file()?)
def access_file(self, pn, pnfsid):
return os.path.join(pn, ".(access)(%s)" % pnfsid)
# FIXME (Could replace with pnfs.Pnfs().use_file()?)
def use_file(self, f, layer):
pn, fn = os.path.split(f)
if is_access_name(fn):
#Use the .(access)() extension path for layers.
return "%s(%s)" % (f, layer)
else:
return os.path.join(pn, '.(use)(%d)(%s)' % (layer, fn))
# FIXME (Could replace with pnfs.Pnfs().fset_file()?)
def fset_file(self, f, size):
pn, fn = os.path.split(f)
if is_access_name(fn):
pnfsid = fn[10:-1] #len(".(access)(") == 10 and len ")" == 1
parent_id = self.get_parent(pnfsid, pn)
directory = os.path.join(pn, ".(access)(%s)" % parent_id)
name = self.get_nameof(pnfsid, pn)
else:
directory = pn
name = fn
return os.path.join(directory, ".(fset)(%s)(size)(%s)" % (name, size))
# FIXME (Could replace with pnfs.Pnfs().nameof_file()?)
def nameof_file(self, pn, pnfsid):
return os.path.join(pn, ".(nameof)(%s)" % (pnfsid,))
# FIXME (Could replace with pnfs.Pnfs().const_file()?)
def const_file(self, f):
pn, fn = os.path.split(f)
if is_access_name(fn):
pnfsid = fn[10:-1] #len(".(access)(") == 10 and len ")" == 1
parent_id = self.get_parent(pnfsid, pn)
directory = os.path.join(pn, ".(access)(%s)" % parent_id)
name = self.get_nameof(pnfsid, pn)
else:
directory = pn
name = fn
return os.path.join(directory, ".(const)(%s)" % (name,))
###############################################################################
def is_pnfs_path(self, pathname, check_name_only = None,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
####################
# Do the first part of check locally as done in
# pnfs.py
# This allows to send requests to pnfs agent only when needed,
# thus reducing the traffic between pnfs agent client and pnfs agent
if not pathname: #Handle None and empty string.
return False
full_pathname = enstore_functions2.fullpath(pathname)[1]
#Determine if the target file or directory is in the pnfs namespace.
if string.find(full_pathname, "/pnfs/") < 0:
return False #If we get here it is not a pnfs directory.
####################
ticket = { 'work' : 'is_pnfs_path',
'fname' : pathname,
'check_name_only' : check_name_only
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(ticket):
return None #Should this raise an exception instead?
return ticket['rc']
def isdir(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_stat',
'filename' : filename,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(ticket):
return None
else:
return stat.S_ISDIR(ticket['statinfo'][stat.ST_MODE])
def isfile(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_stat',
'filename' : filename,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(ticket):
return None
else:
return stat.S_ISREG(ticket['statinfo'][stat.ST_MODE])
def islink(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_stat',
'filename' : filename,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(ticket):
return None
else:
return stat.S_ISLNK(ticket['statinfo'][stat.ST_MODE])
def e_access(self, path, mode, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = { 'work' : 'e_access',
'path' : path,
'mode' : mode,
'rc' : 1
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(ticket):
return None
return ticket['rc']
###############################################################################
"""
def get_directory_name(self, filename):
ticket = { 'work' : 'is_pnfs_path',
'fname' : filename,
}
ticket = self.send(ticket)
if not e_errors.is_ok(ticket):
return None #Should this raise an exception instead?
return ticket['rc']
"""
###############################################################################
def get_file_stat(self, filename, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
if ( self.r_ticket['filename'] != filename ) :
self.r_ticket['filename'] = filename
self.r_ticket = self.send(self.r_ticket, rcv_timeout=rcv_timeout,
tries=tries)
if self.r_ticket['status'][0] == e_errors.OK:
return self.r_ticket['statinfo'], self.r_ticket['bfid'], self.r_ticket['pinfo']
else:
return None, None, None
else:
if ( self.r_ticket['status'][0] == e_errors.OK ) :
return self.r_ticket['statinfo'], self.r_ticket['bfid'], self.r_ticket['pinfo']
else:
return None, None, None
def get_pnfsstat(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_pnfsstat',
'filename' : filename,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if e_errors.is_ok(ticket):
if not ticket['statinfo']:
message = "Received non-error stat() reply with missing " \
"stat info: %s" % (ticket,)
Trace.log(e_errors.ERROR, message)
return ticket['statinfo']
elif ticket['status'][0] == e_errors.IOERROR:
raise IOError, (ticket['errno'], ticket['status'][1])
elif ticket['status'][0] == e_errors.OSERROR:
raise OSError, (ticket.get('errno', e_errors.UNKNOWN),
ticket['status'][1])
elif e_errors.is_timedout(ticket):
raise OSError, (errno.ETIMEDOUT, "pnfs_agent")
else:
raise e_errors.EnstoreError(None, ticket['status'][0],
ticket['status'][1])
def get_stat(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_stat',
'filename' : filename,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if e_errors.is_ok(ticket):
if not ticket['statinfo']:
message = "Received non-error stat() reply with missing " \
"stat info: %s" % (ticket,)
Trace.log(e_errors.ERROR, message)
return ticket['statinfo']
elif ticket['status'][0] == e_errors.IOERROR:
raise IOError, (ticket['errno'], ticket['status'][1])
elif ticket['status'][0] == e_errors.OSERROR:
raise OSError, (ticket.get('errno', e_errors.UNKNOWN),
ticket['status'][1])
elif e_errors.is_timedout(ticket):
raise OSError, (errno.ETIMEDOUT, "pnfs_agent")
else:
raise e_errors.EnstoreError(None, ticket['status'][0],
ticket['status'][1])
###############################################################################
def p_get_library(self, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_library',
'dirname' : dirname,
'library' : None
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_set_library(self, library, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'set_library',
'dirname' : dirname,
'library' : library
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_file_family(self, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_file_family',
'dirname' : dirname,
'file_family' : None
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_set_file_family(self, file_family, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'set_file_family',
'dirname' : dirname,
'file_family' : file_family
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_file_family_width(self, dirname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'get_file_family_width',
'dirname' : dirname,
'file_family_width' : None
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_set_file_family_width(self, file_family_width, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'set_file_family_width',
'dirname' : dirname,
'file_family_width' : file_family_width
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_file_family_wrapper(self, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_file_family_wrapper',
'dirname' : dirname,
'file_family_wrapper' : None
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_set_file_family_wrapper(self, file_family_wrapper, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'set_file_family_wrapper',
'dirname' : dirname,
'file_family_wrapper' : file_family_wrapper
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_storage_group(self, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'get_storage_group',
'dirname' : dirname,
'storage_group' : None
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_set_storage_group(self, storage_group, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'set_storage_group',
'dirname' : dirname,
'storage_group' : storage_group
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
###############################################################################
def p_get_path(self, pnfs_id, mount_point, shortcut=None,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = { 'work' : 'get_path',
'pnfs_id' : pnfs_id,
'dirname' : mount_point,
'shortcut' : shortcut,
'path' : None
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_set_bit_file_id(self, bfid, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'set_bit_file_id',
'fname' : fname,
'bfid' : bfid
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_bit_file_id(self, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'get_bit_file_id',
'fname' : fname,
'bfid' : None
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_id(self, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'get_id',
'fname' : fname,
'file_id' : None
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_nameof(self, pnfsid, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'get_nameof',
'pnfsid' : pnfsid,
'nameof' : None
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_parent_id(self, pnfsid, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'get_parent_id',
'pnfsid' : pnfsid,
'parent_id' : None
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
p_get_parent = p_get_parent_id #backward compatibility
def p_get_file_size(self, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'get_file_size',
'fname' : fname,
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_set_file_size(self, size, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'set_file_size',
'fname' : fname,
'size' : size
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_set_xreference(self, volume, location_cookie, size, file_family,
pnfsFilename, volume_filepath, id, volume_fileP,
bit_file_id, drive, crc, filepath,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'set_xreference',
'volume' : volume,
'location_cookie' : location_cookie,
'size' : size,
'file_family' : file_family,
'pnfsFilename' : pnfsFilename,
'volume_filepath' : volume_filepath,
'id' : id,
'volume_fileP' : volume_fileP,
'bit_file_id' : bit_file_id,
'drive' : drive,
'crc' : crc,
'filepath' : filepath
}
ticket=self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
def p_get_xreference(self, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'get_xreference',
'fname' : fname,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
###############################################################################
def readlayer(self, layer, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'readlayer',
'fname' : fname,
'layer' : layer
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if e_errors.is_ok(ticket):
return ticket['layer_info']
return [] #What sould happen here?
def writelayer(self, layer, value, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'writelayer',
'fname' : fname,
'layer' : layer,
'value' : value
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(ticket):
raise OSError, ticket['status'][1]
###############################################################################
# modify the permissions of the target
def p_chmod(self, mode, filename, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = { 'work' : 'chmod',
'fname' : filename,
'mode' : mode
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# modify the ownership of the target
def p_chown(self, uid, gid, filename, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = { 'work' : 'chown',
'fname' : filename,
'uid' : uid,
'gid' : gid,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# create a new file or update its times
def p_touch(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'touch',
'filename' : filename,
'uid' : os.geteuid(),
'gid' : os.getegid(),
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# create a new file
def p_creat(self, filename, mode = None, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'creat',
'filename' : filename,
'uid' : os.geteuid(),
'gid' : os.getegid(),
}
if mode:
ticket['mode'] = mode
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# update the access and mod time of a file
def p_utime(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'utime',
'fname' : filename,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# delete a pnfs file including its metadata
def p_rm(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'rm',
'fname' : filename,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# delete a pnfs file (leaving the metadata do be put in the trashcan)
def p_remove(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'remove',
'fname' : filename,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# make a directory
def p_mkdir(self, dirname, uid = None, gid = None, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
if uid == None:
uid = os.getuid()
if gid == None:
gid = os.getgid()
ticket = {'work' : 'mkdir',
'path': dirname,
'uid': uid,
'gid':gid
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# make a directory
def p_mkdirs(self, dirname, uid=None, gid=None, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'mkdirs',
'dirname' : dirname,
'uid':uid,
'gid':gid
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# remove a directory
def p_rmdir(self, dirname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'rmdir',
'dirname' : dirname,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# remove a directory
def p_list_dir(self, dirname, just_files = 0, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
ticket = {'work' : 'list_dir',
'dirname' : dirname,
'just_files' : just_files,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
# find a file knowning pnfsid and bfid
def p_find_pnfsid_path(self, pnfsid, bfid, file_record = None,
likely_path = None,
path_type = enstore_constants.BOTH,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
ticket = {'work' : 'find_pnfsid_path',
'pnfsid' : pnfsid,
'bfid' : bfid,
'file_record' : file_record,
'likely_path' : likely_path,
'path_type' : path_type,
}
ticket = self.send(ticket, rcv_timeout=rcv_timeout, tries=tries)
return ticket
###############################################################################
#Take a a ticket and convert it into a traceback.
def raise_exception(self, ticket):
if ticket['status'][0] == e_errors.OK:
return
elif ticket['status'][0] == e_errors.IOERROR:
raise IOError, (ticket['errno'], ticket['status'][1])
elif ticket['status'][0] == e_errors.OSERROR:
raise OSError, (ticket.get('errno', 0), ticket['status'][1])
elif ticket['status'][0] == e_errors.KEYERROR:
raise KeyError, (ticket.get('errno', 0), ticket['status'][1])
elif ticket['status'][0] == e_errors.NET_ERROR:
raise socket.error, (ticket.get('errno', 0), ticket['status'][1])
elif ticket['status'][0] == e_errors.PNFS_ERROR:
raise OSError, (ticket.get('errno', 0), ticket['status'][1])
else:
#Is there anything better?
raise OSError, (ticket.get('errno', 0), ticket['status'][1])
def get_library(self, dirname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_get_library(dirname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['library']
def set_library(self, library, dirname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_set_library(library, dirname,
rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return library #legacy
def get_file_family(self, dirname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_get_file_family(dirname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['file_family']
def set_file_family(self,
file_family, dirname,
rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_set_file_family(file_family,
dirname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return file_family #legacy
def get_file_family_width(self, dirname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_get_file_family_width(dirname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['file_family_width']
def set_file_family_width(self, file_family_width, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_set_file_family_width(file_family_width, dirname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return file_family_width #legacy
def get_file_family_wrapper(self, dirname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_get_file_family_wrapper(dirname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['file_family_wrapper']
def set_file_family_wrapper(self, file_family_wrapper, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_set_file_family_wrapper(file_family_wrapper,
dirname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return file_family_wrapper #legacy
def get_storage_group(self, dirname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_get_storage_group(dirname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['storage_group']
def set_storage_group(self, storage_group, dirname,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_set_storage_group(storage_group, dirname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return storage_group #legacy
def chmod(self, mode, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_chmod(mode, filename, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def chown(self, uid, gid, filename, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_chown(uid, gid, filename,
rcv_timeout=rcv_timeout, tries=tries)
print "reply_ticket:", reply_ticket
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def touch(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_touch(filename, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def creat(self, filename, mode = None, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_creat(filename, mode=mode,
rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def utime(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_utime(filename, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def rm(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_rm(filename, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def remove(self, filename, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_remove(filename, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def mkdir(self, dirname, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_mkdir(dirname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def mkdirs(self, dirname, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_mkdirs(dirname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def rmdir(self, dirname, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_rmdir(dirname, rcv_timeout=rcv_timeout,
tries=tries)
self.raise_exception(reply_ticket)
def list_dir(self, dirname, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_list_dir(dirname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def set_file_size(self, size, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_set_file_size(size, fname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def set_xreference(self, volume, location_cookie, size, file_family,
pnfsFilename, volume_filepath, id, volume_fileP,
bit_file_id, drive, crc, filepath,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_set_xreference(
volume, location_cookie, size, file_family,
pnfsFilename, volume_filepath, id, volume_fileP,
bit_file_id, drive, crc, filepath,
rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def get_xreference(self, fname, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_get_xreference(fname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['xref']
def get_file_size(self, fname, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_get_file_size(fname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['size']
def get_path(self, pnfs_id, mount_point, shortcut=None,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_get_path(pnfs_id, mount_point, shortcut,
rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['path']
def set_bit_file_id(self, bfid, fname, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_set_bit_file_id(bfid, fname,
rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
def get_bit_file_id(self, fname, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_get_bit_file_id(fname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['bfid']
def get_id(self, fname, rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_get_id(fname, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['file_id']
def get_nameof(self, pnfsid, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_get_nameof(pnfsid, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['nameof']
def get_parent_id(self, pnfsid, rcv_timeout=RCV_TIMEOUT,
tries=RCV_TRIES):
reply_ticket = self.p_get_parent_id(pnfsid, rcv_timeout=rcv_timeout,
tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['parent_id']
get_parent = get_parent_id
def find_pnfsid_path(self, pnfsid, bfid, file_record = None,
likely_path = None,
path_type = enstore_constants.BOTH,
rcv_timeout=RCV_TIMEOUT, tries=RCV_TRIES):
reply_ticket = self.p_find_pnfsid_path(
pnfsid, bfid, file_record, likely_path, path_type,
rcv_timeout=rcv_timeout, tries=tries)
if not e_errors.is_ok(reply_ticket):
self.raise_exception(reply_ticket)
return reply_ticket['paths']
find_id_path = find_pnfsid_path
class PnfsAgentClientInterface(generic_client.GenericClientInterface):
def __init__(self, args=sys.argv, user_mode=1):
self.alive_rcv_timeout = RCV_TIMEOUT
self.alive_retries = RCV_TRIES
self.enable = 0
self.status = 0
self.notify = []
self.sendto = []
self.dump = 0
self.warm_restart = 0
self.mkdir = 0
self.mkdirs = 0
self.rmdir = 0
self.exists = 0
self.list_dir = 0
self.layer = None
self.remove = 0
self.touch = 0
self.size = 0
self.id = 0
self.just_files = 0 #optionally used with --list-dir
generic_client.GenericClientInterface.__init__(self, args=args,
user_mode=user_mode)
return
def valid_dictionaries(self):
return (self.alive_options, self.help_options, self.trace_options,
self.pnfs_agent_options)
pnfs_agent_options = {
option.EXISTS:{option.HELP_STRING:"return true if file exists, false" \
" otherwise",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.USER_LEVEL:option.ADMIN},
option.ID:{option.HELP_STRING:"prints the pnfs id",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_NAME:"id",
option.DEFAULT_TYPE:option.INTEGER,
option.VALUE_NAME:"filename",
option.VALUE_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.VALUE_LABEL:"filename",
option.FORCE_SET_DEFAULT:option.FORCE,
option.USER_LEVEL:option.ADMIN,
},
option.LAYER:{option.HELP_STRING:"get layer information",
option.VALUE_TYPE:option.INTEGER,
option.VALUE_USAGE:option.REQUIRED,
#option.VALUE_LABEL:"layer",
option.USER_LEVEL:option.ADMIN,
option.EXTRA_VALUES:[{option.VALUE_NAME:"filename",
option.VALUE_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,},
]},
option.LIST_DIR:{option.HELP_STRING:"list directory contents",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.USER_LEVEL:option.ADMIN},
option.JUST_FILES:{option.HELP_STRING:"used with --list-dir to only" \
" report regular files",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.INTEGER,
option.VALUE_USAGE:option.IGNORED,
option.USER_LEVEL:option.ADMIN},
option.MKDIR:{option.HELP_STRING:"make directory",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.USER_LEVEL:option.ADMIN},
option.MKDIRS:{option.HELP_STRING:"make directory; including missing",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.USER_LEVEL:option.ADMIN},
option.REMOVE:{option.HELP_STRING:"remove file",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.USER_LEVEL:option.ADMIN},
option.RMDIR:{option.HELP_STRING:"remove directory",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.USER_LEVEL:option.ADMIN},
option.SIZE:{option.HELP_STRING:"sets the size of the file",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_NAME:"size",
option.DEFAULT_TYPE:option.INTEGER,
option.VALUE_NAME:"filename",
option.VALUE_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.VALUE_LABEL:"filename",
option.FORCE_SET_DEFAULT:option.FORCE,
option.USER_LEVEL:option.USER2,
option.EXTRA_VALUES:[{option.VALUE_NAME:"filesize",
option.VALUE_TYPE:option.LONG,
option.VALUE_USAGE:option.REQUIRED,
},]
},
option.STATUS:{option.HELP_STRING:"print pnfs_agent status",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.INTEGER,
option.VALUE_USAGE:option.IGNORED,
option.USER_LEVEL:option.ADMIN},
option.TOUCH:{option.HELP_STRING:"make file",
option.DEFAULT_VALUE:option.DEFAULT,
option.DEFAULT_TYPE:option.STRING,
option.VALUE_USAGE:option.REQUIRED,
option.USER_LEVEL:option.ADMIN},
}
def do_work(intf):
pac = PnfsAgentClient((intf.config_host, intf.config_port),
rcv_timeout = intf.alive_rcv_timeout,
rcv_tries = intf.alive_retries)
Trace.init(pac.get_name(MY_NAME))
ticket = {}
try:
ticket = pac.handle_generic_commands(MY_SERVER, intf)
if ticket:
pass
elif intf.status:
ticket = pac.status(intf.alive_rcv_timeout,intf.alive_retries)
pprint.pprint(ticket)
elif intf.mkdir:
if not pac.e_access(intf.mkdir, os.F_OK):
ticket = pac.p_mkdir(intf.mkdir)
else:
ticket = {'status' : (e_errors.OK, None)}
elif intf.mkdirs:
if not pac.e_access(intf.mkdirs, os.F_OK):
ticket = pac.p_mkdirs(intf.mkdirs)
else:
ticket = {'status' : (e_errors.OK, None)}
elif intf.rmdir:
if pac.e_access(intf.rmdir, os.F_OK):
ticket = pac.p_rmdir(intf.rmdir)
else:
ticket = {'status' : (e_errors.OK, None)}
elif intf.exists:
if pac.e_access(intf.exists, os.F_OK):
sys.exit(0)
else:
sys.exit(1)
elif intf.list_dir:
ticket = pac.p_list_dir(intf.list_dir, intf.just_files)
if e_errors.is_ok(ticket):
for file_info in ticket.get('dir_list', {}):
print file_info['name']
elif intf.layer:
layer_info = pac.readlayer(intf.layer, intf.filename)
if not layer_info:
ticket = {'status' : (e_errors.UNKNOWN, None)}
else:
for line in layer_info:
print line,
ticket = {'status' : (e_errors.OK, None)}
elif intf.remove:
if pac.e_access(intf.remove, os.F_OK):
ticket = pac.p_remove(intf.remove)
else:
ticket = {'status' : (e_errors.OK, None)}
elif intf.touch:
if not pac.e_access(intf.touch, os.F_OK):
ticket = pac.p_touch(intf.touch)
else:
ticket = {'status' : (e_errors.OK, None)}
elif intf.size:
if pac.e_access(intf.filename, os.F_OK):
ticket = pac.p_set_file_size(intf.filesize, intf.filename)
else:
ticket = {'status' : (e_errors.DOESNOTEXIST, intf.filename)}
elif intf.id:
if pac.e_access(intf.filename, os.F_OK):
ticket = pac.p_get_id(intf.filename)
if e_errors.is_ok(ticket):
print ticket['file_id']
else:
ticket = {'status' : (e_errors.DOESNOTEXIST, intf.filename)}
else:
intf.print_help()
sys.exit(0)
pac.check_ticket(ticket)
except (KeyboardInterrupt):
sys.exit(1)
if __name__ == "__main__":
Trace.init(MY_NAME)
Trace.trace( 6, 'pac called with args: %s'%(sys.argv,) )
# fill in the interface
intf = PnfsAgentClientInterface(user_mode=0)
do_work(intf)
|
<filename>trajetoria/aula7_modelos_cinematicos.py<gh_stars>0
try:
import sim
except:
print ('--------------------------------------------------------------')
print ('"sim.py" could not be imported. This means very probably that')
print ('either "sim.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "sim.py"')
print ('--------------------------------------------------------------')
print ('')
import time
import numpy as np
def Rz(theta):
return np.array([[ np.cos(theta), -np.sin(theta), 0 ],
[ np.sin(theta), np.cos(theta) , 0 ],
[ 0 , 0 , 1 ]])
####################################################################################
# #
# LEMBRE-SE QUE A SIMULAÇÃO DEVE ESTAR EM EXECUÇÃO! #
# #
####################################################################################
print ('Program started')
sim.simxFinish(-1) # just in case, close all opened connections
clientID=sim.simxStart('127.0.0.1',19999,True,True,5000,5) # Connect to CoppeliaSim
if clientID!=-1:
print ('Connected to remote API server')
robotname = 'Pioneer_p3dx'
returnCode, robotHandle = sim.simxGetObjectHandle(clientID, robotname, sim.simx_opmode_oneshot_wait)
returnCode, l_wheel = sim.simxGetObjectHandle(clientID, robotname + '_leftMotor', sim.simx_opmode_oneshot_wait)
returnCode, r_wheel = sim.simxGetObjectHandle(clientID, robotname + '_rightMotor', sim.simx_opmode_oneshot_wait)
# Específico do robô
# https://www.generationrobots.com/media/Pioneer3DX-P3DX-RevA.pdf
# L = 0.381 # Metros
# r = 0.0975 # Metros
L = 0.331
r = 0.09751
# Velocidade desejada (linear, angular)
v = .3
w = np.deg2rad(10)
# Cinemática Inversa
wr = ((2.0*v) + (w*L))/(2.0*r)
wl = ((2.0*v) - (w*L))/(2.0*r)
u = np.array([wr, wl])
# Enviando velocidades
sim.simxSetJointTargetVelocity(clientID, l_wheel, wl, sim.simx_opmode_oneshot_wait)
sim.simxSetJointTargetVelocity(clientID, r_wheel, wr, sim.simx_opmode_oneshot_wait)
q = np.array([0, 0, 0])
t = 0
# Lembrar de habilitar o 'Real-time mode'
startTime=time.time()
lastTime = startTime
while t < 10:
now = time.time()
dt = now - lastTime
# Cinemática Direta
Mdir = np.array([[r*np.cos(q[2])/2, r*np.cos(q[2])/2], [r*np.sin(q[2])/2, r*np.sin(q[2])/2], [r/L, -r/L]])
q = q + (Mdir @ u)*dt
t = t + dt
lastTime = now
print('==> ', t, q[:2], np.rad2deg(q[2]))
returnCode, pos = sim.simxGetObjectPosition(clientID, robotHandle, -1, sim.simx_opmode_oneshot_wait)
print('Pos: ', pos)
returnCode, ori = sim.simxGetObjectOrientation(clientID, robotHandle, -1, sim.simx_opmode_oneshot_wait)
print('Ori: ', np.rad2deg(ori))
sim.simxSetJointTargetVelocity(clientID, r_wheel, 0, sim.simx_opmode_oneshot_wait)
sim.simxSetJointTargetVelocity(clientID, l_wheel, 0, sim.simx_opmode_oneshot_wait)
# Now close the connection to CoppeliaSim:
sim.simxFinish(clientID)
else:
print ('Failed connecting to remote API server')
print ('Program ended') |
<filename>icefall/decode.py<gh_stars>100-1000
# Copyright 2021 Xiaomi Corp. (authors: <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, List, Optional, Union
import k2
import torch
from icefall.utils import get_texts
def _intersect_device(
a_fsas: k2.Fsa,
b_fsas: k2.Fsa,
b_to_a_map: torch.Tensor,
sorted_match_a: bool,
batch_size: int = 50,
) -> k2.Fsa:
"""This is a wrapper of k2.intersect_device and its purpose is to split
b_fsas into several batches and process each batch separately to avoid
CUDA OOM error.
The arguments and return value of this function are the same as
:func:`k2.intersect_device`.
"""
num_fsas = b_fsas.shape[0]
if num_fsas <= batch_size:
return k2.intersect_device(
a_fsas, b_fsas, b_to_a_map=b_to_a_map, sorted_match_a=sorted_match_a
)
num_batches = (num_fsas + batch_size - 1) // batch_size
splits = []
for i in range(num_batches):
start = i * batch_size
end = min(start + batch_size, num_fsas)
splits.append((start, end))
ans = []
for start, end in splits:
indexes = torch.arange(start, end).to(b_to_a_map)
fsas = k2.index_fsa(b_fsas, indexes)
b_to_a = k2.index_select(b_to_a_map, indexes)
path_lattice = k2.intersect_device(
a_fsas, fsas, b_to_a_map=b_to_a, sorted_match_a=sorted_match_a
)
ans.append(path_lattice)
return k2.cat(ans)
def get_lattice(
nnet_output: torch.Tensor,
decoding_graph: k2.Fsa,
supervision_segments: torch.Tensor,
search_beam: float,
output_beam: float,
min_active_states: int,
max_active_states: int,
subsampling_factor: int = 1,
) -> k2.Fsa:
"""Get the decoding lattice from a decoding graph and neural
network output.
Args:
nnet_output:
It is the output of a neural model of shape `(N, T, C)`.
decoding_graph:
An Fsa, the decoding graph. It can be either an HLG
(see `compile_HLG.py`) or an H (see `k2.ctc_topo`).
supervision_segments:
A 2-D **CPU** tensor of dtype `torch.int32` with 3 columns.
Each row contains information for a supervision segment. Column 0
is the `sequence_index` indicating which sequence this segment
comes from; column 1 specifies the `start_frame` of this segment
within the sequence; column 2 contains the `duration` of this
segment.
search_beam:
Decoding beam, e.g. 20. Smaller is faster, larger is more exact
(less pruning). This is the default value; it may be modified by
`min_active_states` and `max_active_states`.
output_beam:
Beam to prune output, similar to lattice-beam in Kaldi. Relative
to best path of output.
min_active_states:
Minimum number of FSA states that are allowed to be active on any given
frame for any given intersection/composition task. This is advisory,
in that it will try not to have fewer than this number active.
Set it to zero if there is no constraint.
max_active_states:
Maximum number of FSA states that are allowed to be active on any given
frame for any given intersection/composition task. This is advisory,
in that it will try not to exceed that but may not always succeed.
You can use a very large number if no constraint is needed.
subsampling_factor:
The subsampling factor of the model.
Returns:
An FsaVec containing the decoding result. It has axes [utt][state][arc].
"""
dense_fsa_vec = k2.DenseFsaVec(
nnet_output,
supervision_segments,
allow_truncate=subsampling_factor - 1,
)
lattice = k2.intersect_dense_pruned(
decoding_graph,
dense_fsa_vec,
search_beam=search_beam,
output_beam=output_beam,
min_active_states=min_active_states,
max_active_states=max_active_states,
)
return lattice
class Nbest(object):
"""
An Nbest object contains two fields:
(1) fsa. It is an FsaVec containing a vector of **linear** FSAs.
Its axes are [path][state][arc]
(2) shape. Its type is :class:`k2.RaggedShape`.
Its axes are [utt][path]
The field `shape` has two axes [utt][path]. `shape.dim0` contains
the number of utterances, which is also the number of rows in the
supervision_segments. `shape.tot_size(1)` contains the number
of paths, which is also the number of FSAs in `fsa`.
Caution:
Don't be confused by the name `Nbest`. The best in the name `Nbest`
has nothing to do with `best scores`. The important part is
`N` in `Nbest`, not `best`.
"""
def __init__(self, fsa: k2.Fsa, shape: k2.RaggedShape) -> None:
"""
Args:
fsa:
An FsaVec with axes [path][state][arc]. It is expected to contain
a list of **linear** FSAs.
shape:
A ragged shape with two axes [utt][path].
"""
assert len(fsa.shape) == 3, f"fsa.shape: {fsa.shape}"
assert shape.num_axes == 2, f"num_axes: {shape.num_axes}"
if fsa.shape[0] != shape.tot_size(1):
raise ValueError(
f"{fsa.shape[0]} vs {shape.tot_size(1)}\n"
"Number of FSAs in `fsa` does not match the given shape"
)
self.fsa = fsa
self.shape = shape
def __str__(self):
s = "Nbest("
s += f"Number of utterances:{self.shape.dim0}, "
s += f"Number of Paths:{self.fsa.shape[0]})"
return s
@staticmethod
def from_lattice(
lattice: k2.Fsa,
num_paths: int,
use_double_scores: bool = True,
nbest_scale: float = 0.5,
) -> "Nbest":
"""Construct an Nbest object by **sampling** `num_paths` from a lattice.
Each sampled path is a linear FSA.
We assume `lattice.labels` contains token IDs and `lattice.aux_labels`
contains word IDs.
Args:
lattice:
An FsaVec with axes [utt][state][arc].
num_paths:
Number of paths to **sample** from the lattice
using :func:`k2.random_paths`.
use_double_scores:
True to use double precision in :func:`k2.random_paths`.
False to use single precision.
scale:
Scale `lattice.score` before passing it to :func:`k2.random_paths`.
A smaller value leads to more unique paths at the risk of being not
to sample the path with the best score.
Returns:
Return an Nbest instance.
"""
saved_scores = lattice.scores.clone()
lattice.scores *= nbest_scale
# path is a ragged tensor with dtype torch.int32.
# It has three axes [utt][path][arc_pos]
path = k2.random_paths(
lattice, num_paths=num_paths, use_double_scores=use_double_scores
)
lattice.scores = saved_scores
# word_seq is a k2.RaggedTensor sharing the same shape as `path`
# but it contains word IDs. Note that it also contains 0s and -1s.
# The last entry in each sublist is -1.
# It axes is [utt][path][word_id]
if isinstance(lattice.aux_labels, torch.Tensor):
word_seq = k2.ragged.index(lattice.aux_labels, path)
else:
word_seq = lattice.aux_labels.index(path)
word_seq = word_seq.remove_axis(word_seq.num_axes - 2)
word_seq = word_seq.remove_values_leq(0)
# Each utterance has `num_paths` paths but some of them transduces
# to the same word sequence, so we need to remove repeated word
# sequences within an utterance. After removing repeats, each utterance
# contains different number of paths
#
# `new2old` is a 1-D torch.Tensor mapping from the output path index
# to the input path index.
_, _, new2old = word_seq.unique(
need_num_repeats=False, need_new2old_indexes=True
)
# kept_path is a ragged tensor with dtype torch.int32.
# It has axes [utt][path][arc_pos]
kept_path, _ = path.index(new2old, axis=1, need_value_indexes=False)
# utt_to_path_shape has axes [utt][path]
utt_to_path_shape = kept_path.shape.get_layer(0)
# Remove the utterance axis.
# Now kept_path has only two axes [path][arc_pos]
kept_path = kept_path.remove_axis(0)
# labels is a ragged tensor with 2 axes [path][token_id]
# Note that it contains -1s.
labels = k2.ragged.index(lattice.labels.contiguous(), kept_path)
# Remove -1 from labels as we will use it to construct a linear FSA
labels = labels.remove_values_eq(-1)
if isinstance(lattice.aux_labels, k2.RaggedTensor):
# lattice.aux_labels is a ragged tensor with dtype torch.int32.
# It has 2 axes [arc][word], so aux_labels is also a ragged tensor
# with 2 axes [arc][word]
aux_labels, _ = lattice.aux_labels.index(
indexes=kept_path.values, axis=0, need_value_indexes=False
)
else:
assert isinstance(lattice.aux_labels, torch.Tensor)
aux_labels = k2.index_select(lattice.aux_labels, kept_path.values)
# aux_labels is a 1-D torch.Tensor. It also contains -1 and 0.
fsa = k2.linear_fsa(labels)
fsa.aux_labels = aux_labels
# Caution: fsa.scores are all 0s.
# `fsa` has only one extra attribute: aux_labels.
return Nbest(fsa=fsa, shape=utt_to_path_shape)
def intersect(self, lattice: k2.Fsa, use_double_scores=True) -> "Nbest":
"""Intersect this Nbest object with a lattice, get 1-best
path from the resulting FsaVec, and return a new Nbest object.
The purpose of this function is to attach scores to an Nbest.
Args:
lattice:
An FsaVec with axes [utt][state][arc]. If it has `aux_labels`, then
we assume its `labels` are token IDs and `aux_labels` are word IDs.
If it has only `labels`, we assume its `labels` are word IDs.
use_double_scores:
True to use double precision when computing shortest path.
False to use single precision.
Returns:
Return a new Nbest. This new Nbest shares the same shape with `self`,
while its `fsa` is the 1-best path from intersecting `self.fsa` and
`lattice`. Also, its `fsa` has non-zero scores and inherits attributes
for `lattice`.
"""
# Note: We view each linear FSA as a word sequence
# and we use the passed lattice to give each word sequence a score.
#
# We are not viewing each linear FSAs as a token sequence.
#
# So we use k2.invert() here.
# We use a word fsa to intersect with k2.invert(lattice)
word_fsa = k2.invert(self.fsa)
if hasattr(lattice, "aux_labels"):
# delete token IDs as it is not needed
del word_fsa.aux_labels
word_fsa.scores.zero_()
word_fsa_with_epsilon_loops = k2.remove_epsilon_and_add_self_loops(
word_fsa
)
path_to_utt_map = self.shape.row_ids(1)
if hasattr(lattice, "aux_labels"):
# lattice has token IDs as labels and word IDs as aux_labels.
# inv_lattice has word IDs as labels and token IDs as aux_labels
inv_lattice = k2.invert(lattice)
inv_lattice = k2.arc_sort(inv_lattice)
else:
inv_lattice = k2.arc_sort(lattice)
if inv_lattice.shape[0] == 1:
path_lattice = _intersect_device(
inv_lattice,
word_fsa_with_epsilon_loops,
b_to_a_map=torch.zeros_like(path_to_utt_map),
sorted_match_a=True,
)
else:
path_lattice = _intersect_device(
inv_lattice,
word_fsa_with_epsilon_loops,
b_to_a_map=path_to_utt_map,
sorted_match_a=True,
)
# path_lattice has word IDs as labels and token IDs as aux_labels
path_lattice = k2.top_sort(k2.connect(path_lattice))
one_best = k2.shortest_path(
path_lattice, use_double_scores=use_double_scores
)
one_best = k2.invert(one_best)
# Now one_best has token IDs as labels and word IDs as aux_labels
return Nbest(fsa=one_best, shape=self.shape)
def compute_am_scores(self) -> k2.RaggedTensor:
"""Compute AM scores of each linear FSA (i.e., each path within
an utterance).
Hint:
`self.fsa.scores` contains two parts: acoustic scores (AM scores)
and n-gram language model scores (LM scores).
Caution:
We require that ``self.fsa`` has an attribute ``lm_scores``.
Returns:
Return a ragged tensor with 2 axes [utt][path_scores].
Its dtype is torch.float64.
"""
scores_shape = self.fsa.arcs.shape().remove_axis(1)
# scores_shape has axes [path][arc]
am_scores = self.fsa.scores - self.fsa.lm_scores
ragged_am_scores = k2.RaggedTensor(scores_shape, am_scores.contiguous())
tot_scores = ragged_am_scores.sum()
return k2.RaggedTensor(self.shape, tot_scores)
def compute_lm_scores(self) -> k2.RaggedTensor:
"""Compute LM scores of each linear FSA (i.e., each path within
an utterance).
Hint:
`self.fsa.scores` contains two parts: acoustic scores (AM scores)
and n-gram language model scores (LM scores).
Caution:
We require that ``self.fsa`` has an attribute ``lm_scores``.
Returns:
Return a ragged tensor with 2 axes [utt][path_scores].
Its dtype is torch.float64.
"""
scores_shape = self.fsa.arcs.shape().remove_axis(1)
# scores_shape has axes [path][arc]
ragged_lm_scores = k2.RaggedTensor(
scores_shape, self.fsa.lm_scores.contiguous()
)
tot_scores = ragged_lm_scores.sum()
return k2.RaggedTensor(self.shape, tot_scores)
def tot_scores(self) -> k2.RaggedTensor:
"""Get total scores of FSAs in this Nbest.
Note:
Since FSAs in Nbest are just linear FSAs, log-semiring
and tropical semiring produce the same total scores.
Returns:
Return a ragged tensor with two axes [utt][path_scores].
Its dtype is torch.float64.
"""
scores_shape = self.fsa.arcs.shape().remove_axis(1)
# scores_shape has axes [path][arc]
ragged_scores = k2.RaggedTensor(
scores_shape, self.fsa.scores.contiguous()
)
tot_scores = ragged_scores.sum()
return k2.RaggedTensor(self.shape, tot_scores)
def build_levenshtein_graphs(self) -> k2.Fsa:
"""Return an FsaVec with axes [utt][state][arc]."""
word_ids = get_texts(self.fsa, return_ragged=True)
return k2.levenshtein_graph(word_ids)
def one_best_decoding(
lattice: k2.Fsa,
use_double_scores: bool = True,
) -> k2.Fsa:
"""Get the best path from a lattice.
Args:
lattice:
The decoding lattice returned by :func:`get_lattice`.
use_double_scores:
True to use double precision floating point in the computation.
False to use single precision.
Return:
An FsaVec containing linear paths.
"""
best_path = k2.shortest_path(lattice, use_double_scores=use_double_scores)
return best_path
def nbest_decoding(
lattice: k2.Fsa,
num_paths: int,
use_double_scores: bool = True,
nbest_scale: float = 1.0,
) -> k2.Fsa:
"""It implements something like CTC prefix beam search using n-best lists.
The basic idea is to first extract `num_paths` paths from the given lattice,
build a word sequence from these paths, and compute the total scores
of the word sequence in the tropical semiring. The one with the max score
is used as the decoding output.
Caution:
Don't be confused by `best` in the name `n-best`. Paths are selected
**randomly**, not by ranking their scores.
Hint:
This decoding method is for demonstration only and it does
not produce a lower WER than :func:`one_best_decoding`.
Args:
lattice:
The decoding lattice, e.g., can be the return value of
:func:`get_lattice`. It has 3 axes [utt][state][arc].
num_paths:
It specifies the size `n` in n-best. Note: Paths are selected randomly
and those containing identical word sequences are removed and only one
of them is kept.
use_double_scores:
True to use double precision floating point in the computation.
False to use single precision.
nbest_scale:
It's the scale applied to the `lattice.scores`. A smaller value
leads to more unique paths at the risk of missing the correct path.
Returns:
An FsaVec containing **linear** FSAs. It axes are [utt][state][arc].
"""
nbest = Nbest.from_lattice(
lattice=lattice,
num_paths=num_paths,
use_double_scores=use_double_scores,
nbest_scale=nbest_scale,
)
# nbest.fsa.scores contains 0s
nbest = nbest.intersect(lattice)
# now nbest.fsa.scores gets assigned
# max_indexes contains the indexes for the path with the maximum score
# within an utterance.
max_indexes = nbest.tot_scores().argmax()
best_path = k2.index_fsa(nbest.fsa, max_indexes)
return best_path
def nbest_oracle(
lattice: k2.Fsa,
num_paths: int,
ref_texts: List[str],
word_table: k2.SymbolTable,
use_double_scores: bool = True,
nbest_scale: float = 0.5,
oov: str = "<UNK>",
) -> Dict[str, List[List[int]]]:
"""Select the best hypothesis given a lattice and a reference transcript.
The basic idea is to extract `num_paths` paths from the given lattice,
unique them, and select the one that has the minimum edit distance with
the corresponding reference transcript as the decoding output.
The decoding result returned from this function is the best result that
we can obtain using n-best decoding with all kinds of rescoring techniques.
This function is useful to tune the value of `nbest_scale`.
Args:
lattice:
An FsaVec with axes [utt][state][arc].
Note: We assume its `aux_labels` contains word IDs.
num_paths:
The size of `n` in n-best.
ref_texts:
A list of reference transcript. Each entry contains space(s)
separated words
word_table:
It is the word symbol table.
use_double_scores:
True to use double precision for computation. False to use
single precision.
nbest_scale:
It's the scale applied to the lattice.scores. A smaller value
yields more unique paths.
oov:
The out of vocabulary word.
Return:
Return a dict. Its key contains the information about the parameters
when calling this function, while its value contains the decoding output.
`len(ans_dict) == len(ref_texts)`
"""
device = lattice.device
nbest = Nbest.from_lattice(
lattice=lattice,
num_paths=num_paths,
use_double_scores=use_double_scores,
nbest_scale=nbest_scale,
)
hyps = nbest.build_levenshtein_graphs()
oov_id = word_table[oov]
word_ids_list = []
for text in ref_texts:
word_ids = []
for word in text.split():
if word in word_table:
word_ids.append(word_table[word])
else:
word_ids.append(oov_id)
word_ids_list.append(word_ids)
refs = k2.levenshtein_graph(word_ids_list, device=device)
levenshtein_alignment = k2.levenshtein_alignment(
refs=refs,
hyps=hyps,
hyp_to_ref_map=nbest.shape.row_ids(1),
sorted_match_ref=True,
)
tot_scores = levenshtein_alignment.get_tot_scores(
use_double_scores=False, log_semiring=False
)
ragged_tot_scores = k2.RaggedTensor(nbest.shape, tot_scores)
max_indexes = ragged_tot_scores.argmax()
best_path = k2.index_fsa(nbest.fsa, max_indexes)
return best_path
def rescore_with_n_best_list(
lattice: k2.Fsa,
G: k2.Fsa,
num_paths: int,
lm_scale_list: List[float],
nbest_scale: float = 1.0,
use_double_scores: bool = True,
) -> Dict[str, k2.Fsa]:
"""Rescore an n-best list with an n-gram LM.
The path with the maximum score is used as the decoding output.
Args:
lattice:
An FsaVec with axes [utt][state][arc]. It must have the following
attributes: ``aux_labels`` and ``lm_scores``. Its labels are
token IDs and ``aux_labels`` word IDs.
G:
An FsaVec containing only a single FSA. It is an n-gram LM.
num_paths:
Size of nbest list.
lm_scale_list:
A list of float representing LM score scales.
nbest_scale:
Scale to be applied to ``lattice.score`` when sampling paths
using ``k2.random_paths``.
use_double_scores:
True to use double precision during computation. False to use
single precision.
Returns:
A dict of FsaVec, whose key is an lm_scale and the value is the
best decoding path for each utterance in the lattice.
"""
device = lattice.device
assert len(lattice.shape) == 3
assert hasattr(lattice, "aux_labels")
assert hasattr(lattice, "lm_scores")
assert G.shape == (1, None, None)
assert G.device == device
assert hasattr(G, "aux_labels") is False
nbest = Nbest.from_lattice(
lattice=lattice,
num_paths=num_paths,
use_double_scores=use_double_scores,
nbest_scale=nbest_scale,
)
# nbest.fsa.scores are all 0s at this point
nbest = nbest.intersect(lattice)
# Now nbest.fsa has its scores set
assert hasattr(nbest.fsa, "lm_scores")
am_scores = nbest.compute_am_scores()
nbest = nbest.intersect(G)
# Now nbest contains only lm scores
lm_scores = nbest.tot_scores()
ans = dict()
for lm_scale in lm_scale_list:
tot_scores = am_scores.values / lm_scale + lm_scores.values
tot_scores = k2.RaggedTensor(nbest.shape, tot_scores)
max_indexes = tot_scores.argmax()
best_path = k2.index_fsa(nbest.fsa, max_indexes)
key = f"lm_scale_{lm_scale}"
ans[key] = best_path
return ans
def rescore_with_whole_lattice(
lattice: k2.Fsa,
G_with_epsilon_loops: k2.Fsa,
lm_scale_list: Optional[List[float]] = None,
use_double_scores: bool = True,
) -> Union[k2.Fsa, Dict[str, k2.Fsa]]:
"""Intersect the lattice with an n-gram LM and use shortest path
to decode.
The input lattice is obtained by intersecting `HLG` with
a DenseFsaVec, where the `G` in `HLG` is in general a 3-gram LM.
The input `G_with_epsilon_loops` is usually a 4-gram LM. You can consider
this function as a second pass decoding. In the first pass decoding, we
use a small G, while we use a larger G in the second pass decoding.
Args:
lattice:
An FsaVec with axes [utt][state][arc]. Its `aux_lables` are word IDs.
It must have an attribute `lm_scores`.
G_with_epsilon_loops:
An FsaVec containing only a single FSA. It contains epsilon self-loops.
It is an acceptor and its labels are word IDs.
lm_scale_list:
Optional. If none, return the intersection of `lattice` and
`G_with_epsilon_loops`.
If not None, it contains a list of values to scale LM scores.
For each scale, there is a corresponding decoding result contained in
the resulting dict.
use_double_scores:
True to use double precision in the computation.
False to use single precision.
Returns:
If `lm_scale_list` is None, return a new lattice which is the intersection
result of `lattice` and `G_with_epsilon_loops`.
Otherwise, return a dict whose key is an entry in `lm_scale_list` and the
value is the decoding result (i.e., an FsaVec containing linear FSAs).
"""
# Nbest is not used in this function
assert hasattr(lattice, "lm_scores")
assert G_with_epsilon_loops.shape == (1, None, None)
device = lattice.device
lattice.scores = lattice.scores - lattice.lm_scores
# We will use lm_scores from G, so remove lats.lm_scores here
del lattice.lm_scores
assert hasattr(G_with_epsilon_loops, "lm_scores")
# Now, lattice.scores contains only am_scores
# inv_lattice has word IDs as labels.
# Its `aux_labels` is token IDs
inv_lattice = k2.invert(lattice)
num_seqs = lattice.shape[0]
b_to_a_map = torch.zeros(num_seqs, device=device, dtype=torch.int32)
max_loop_count = 10
loop_count = 0
while loop_count <= max_loop_count:
loop_count += 1
try:
rescoring_lattice = k2.intersect_device(
G_with_epsilon_loops,
inv_lattice,
b_to_a_map,
sorted_match_a=True,
)
rescoring_lattice = k2.top_sort(k2.connect(rescoring_lattice))
break
except RuntimeError as e:
logging.info(f"Caught exception:\n{e}\n")
logging.info(
f"num_arcs before pruning: {inv_lattice.arcs.num_elements()}"
)
logging.info(
"This OOM is not an error. You can ignore it. "
"If your model does not converge well, or --max-duration "
"is too large, or the input sound file is difficult to "
"decode, you will meet this exception."
)
# NOTE(fangjun): The choice of the threshold 1e-9 is arbitrary here
# to avoid OOM. You may need to fine tune it.
inv_lattice = k2.prune_on_arc_post(inv_lattice, 1e-9, True)
logging.info(
f"num_arcs after pruning: {inv_lattice.arcs.num_elements()}"
)
if loop_count > max_loop_count:
logging.info("Return None as the resulting lattice is too large")
return None
# lat has token IDs as labels
# and word IDs as aux_labels.
lat = k2.invert(rescoring_lattice)
if lm_scale_list is None:
return lat
ans = dict()
saved_am_scores = lat.scores - lat.lm_scores
for lm_scale in lm_scale_list:
am_scores = saved_am_scores / lm_scale
lat.scores = am_scores + lat.lm_scores
best_path = k2.shortest_path(lat, use_double_scores=use_double_scores)
key = f"lm_scale_{lm_scale}"
ans[key] = best_path
return ans
def rescore_with_attention_decoder(
lattice: k2.Fsa,
num_paths: int,
model: torch.nn.Module,
memory: torch.Tensor,
memory_key_padding_mask: Optional[torch.Tensor],
sos_id: int,
eos_id: int,
nbest_scale: float = 1.0,
ngram_lm_scale: Optional[float] = None,
attention_scale: Optional[float] = None,
use_double_scores: bool = True,
) -> Dict[str, k2.Fsa]:
"""This function extracts `num_paths` paths from the given lattice and uses
an attention decoder to rescore them. The path with the highest score is
the decoding output.
Args:
lattice:
An FsaVec with axes [utt][state][arc].
num_paths:
Number of paths to extract from the given lattice for rescoring.
model:
A transformer model. See the class "Transformer" in
conformer_ctc/transformer.py for its interface.
memory:
The encoder memory of the given model. It is the output of
the last torch.nn.TransformerEncoder layer in the given model.
Its shape is `(T, N, C)`.
memory_key_padding_mask:
The padding mask for memory with shape `(N, T)`.
sos_id:
The token ID for SOS.
eos_id:
The token ID for EOS.
nbest_scale:
It's the scale applied to `lattice.scores`. A smaller value
leads to more unique paths at the risk of missing the correct path.
ngram_lm_scale:
Optional. It specifies the scale for n-gram LM scores.
attention_scale:
Optional. It specifies the scale for attention decoder scores.
Returns:
A dict of FsaVec, whose key contains a string
ngram_lm_scale_attention_scale and the value is the
best decoding path for each utterance in the lattice.
"""
nbest = Nbest.from_lattice(
lattice=lattice,
num_paths=num_paths,
use_double_scores=use_double_scores,
nbest_scale=nbest_scale,
)
# nbest.fsa.scores are all 0s at this point
nbest = nbest.intersect(lattice)
# Now nbest.fsa has its scores set.
# Also, nbest.fsa inherits the attributes from `lattice`.
assert hasattr(nbest.fsa, "lm_scores")
am_scores = nbest.compute_am_scores()
ngram_lm_scores = nbest.compute_lm_scores()
# The `tokens` attribute is set inside `compile_hlg.py`
assert hasattr(nbest.fsa, "tokens")
assert isinstance(nbest.fsa.tokens, torch.Tensor)
path_to_utt_map = nbest.shape.row_ids(1).to(torch.long)
# the shape of memory is (T, N, C), so we use axis=1 here
expanded_memory = memory.index_select(1, path_to_utt_map)
if memory_key_padding_mask is not None:
# The shape of memory_key_padding_mask is (N, T), so we
# use axis=0 here.
expanded_memory_key_padding_mask = memory_key_padding_mask.index_select(
0, path_to_utt_map
)
else:
expanded_memory_key_padding_mask = None
# remove axis corresponding to states.
tokens_shape = nbest.fsa.arcs.shape().remove_axis(1)
tokens = k2.RaggedTensor(tokens_shape, nbest.fsa.tokens)
tokens = tokens.remove_values_leq(0)
token_ids = tokens.tolist()
if len(token_ids) == 0:
print("Warning: rescore_with_attention_decoder(): empty token-ids")
return None
nll = model.decoder_nll(
memory=expanded_memory,
memory_key_padding_mask=expanded_memory_key_padding_mask,
token_ids=token_ids,
sos_id=sos_id,
eos_id=eos_id,
)
assert nll.ndim == 2
assert nll.shape[0] == len(token_ids)
attention_scores = -nll.sum(dim=1)
if ngram_lm_scale is None:
ngram_lm_scale_list = [0.01, 0.05, 0.08]
ngram_lm_scale_list += [0.1, 0.3, 0.5, 0.6, 0.7, 0.9, 1.0]
ngram_lm_scale_list += [1.1, 1.2, 1.3, 1.5, 1.7, 1.9, 2.0]
ngram_lm_scale_list += [2.1, 2.2, 2.3, 2.5, 3.0, 4.0, 5.0]
else:
ngram_lm_scale_list = [ngram_lm_scale]
if attention_scale is None:
attention_scale_list = [0.01, 0.05, 0.08]
attention_scale_list += [0.1, 0.3, 0.5, 0.6, 0.7, 0.9, 1.0]
attention_scale_list += [1.1, 1.2, 1.3, 1.5, 1.7, 1.9, 2.0]
attention_scale_list += [2.1, 2.2, 2.3, 2.5, 3.0, 4.0, 5.0]
else:
attention_scale_list = [attention_scale]
ans = dict()
for n_scale in ngram_lm_scale_list:
for a_scale in attention_scale_list:
tot_scores = (
am_scores.values
+ n_scale * ngram_lm_scores.values
+ a_scale * attention_scores
)
ragged_tot_scores = k2.RaggedTensor(nbest.shape, tot_scores)
max_indexes = ragged_tot_scores.argmax()
best_path = k2.index_fsa(nbest.fsa, max_indexes)
key = f"ngram_lm_scale_{n_scale}_attention_scale_{a_scale}"
ans[key] = best_path
return ans
|
<gh_stars>10-100
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.krmapihosting import krm_api_host_pb2
from google3.cloud.graphite.mmv2.services.google.krmapihosting import (
krm_api_host_pb2_grpc,
)
from typing import List
class KrmApiHost(object):
def __init__(
self,
name: str = None,
labels: dict = None,
bundles_config: dict = None,
use_private_endpoint: bool = None,
gke_resource_link: str = None,
state: str = None,
management_config: dict = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.labels = labels
self.bundles_config = bundles_config
self.use_private_endpoint = use_private_endpoint
self.management_config = management_config
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = krm_api_host_pb2_grpc.KrmapihostingAlphaKrmApiHostServiceStub(
channel.Channel()
)
request = krm_api_host_pb2.ApplyKrmapihostingAlphaKrmApiHostRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if KrmApiHostBundlesConfig.to_proto(self.bundles_config):
request.resource.bundles_config.CopyFrom(
KrmApiHostBundlesConfig.to_proto(self.bundles_config)
)
else:
request.resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
request.resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if KrmApiHostManagementConfig.to_proto(self.management_config):
request.resource.management_config.CopyFrom(
KrmApiHostManagementConfig.to_proto(self.management_config)
)
else:
request.resource.ClearField("management_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyKrmapihostingAlphaKrmApiHost(request)
self.name = Primitive.from_proto(response.name)
self.labels = Primitive.from_proto(response.labels)
self.bundles_config = KrmApiHostBundlesConfig.from_proto(
response.bundles_config
)
self.use_private_endpoint = Primitive.from_proto(response.use_private_endpoint)
self.gke_resource_link = Primitive.from_proto(response.gke_resource_link)
self.state = KrmApiHostStateEnum.from_proto(response.state)
self.management_config = KrmApiHostManagementConfig.from_proto(
response.management_config
)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = krm_api_host_pb2_grpc.KrmapihostingAlphaKrmApiHostServiceStub(
channel.Channel()
)
request = krm_api_host_pb2.DeleteKrmapihostingAlphaKrmApiHostRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if KrmApiHostBundlesConfig.to_proto(self.bundles_config):
request.resource.bundles_config.CopyFrom(
KrmApiHostBundlesConfig.to_proto(self.bundles_config)
)
else:
request.resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
request.resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if KrmApiHostManagementConfig.to_proto(self.management_config):
request.resource.management_config.CopyFrom(
KrmApiHostManagementConfig.to_proto(self.management_config)
)
else:
request.resource.ClearField("management_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteKrmapihostingAlphaKrmApiHost(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = krm_api_host_pb2_grpc.KrmapihostingAlphaKrmApiHostServiceStub(
channel.Channel()
)
request = krm_api_host_pb2.ListKrmapihostingAlphaKrmApiHostRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListKrmapihostingAlphaKrmApiHost(request).items
def to_proto(self):
resource = krm_api_host_pb2.KrmapihostingAlphaKrmApiHost()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if KrmApiHostBundlesConfig.to_proto(self.bundles_config):
resource.bundles_config.CopyFrom(
KrmApiHostBundlesConfig.to_proto(self.bundles_config)
)
else:
resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if KrmApiHostManagementConfig.to_proto(self.management_config):
resource.management_config.CopyFrom(
KrmApiHostManagementConfig.to_proto(self.management_config)
)
else:
resource.ClearField("management_config")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class KrmApiHostBundlesConfig(object):
def __init__(self, config_controller_config: dict = None):
self.config_controller_config = config_controller_config
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = krm_api_host_pb2.KrmapihostingAlphaKrmApiHostBundlesConfig()
if KrmApiHostBundlesConfigConfigControllerConfig.to_proto(
resource.config_controller_config
):
res.config_controller_config.CopyFrom(
KrmApiHostBundlesConfigConfigControllerConfig.to_proto(
resource.config_controller_config
)
)
else:
res.ClearField("config_controller_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KrmApiHostBundlesConfig(
config_controller_config=KrmApiHostBundlesConfigConfigControllerConfig.from_proto(
resource.config_controller_config
),
)
class KrmApiHostBundlesConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [KrmApiHostBundlesConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [KrmApiHostBundlesConfig.from_proto(i) for i in resources]
class KrmApiHostBundlesConfigConfigControllerConfig(object):
def __init__(self, enabled: bool = None):
self.enabled = enabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
krm_api_host_pb2.KrmapihostingAlphaKrmApiHostBundlesConfigConfigControllerConfig()
)
if Primitive.to_proto(resource.enabled):
res.enabled = Primitive.to_proto(resource.enabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KrmApiHostBundlesConfigConfigControllerConfig(
enabled=Primitive.from_proto(resource.enabled),
)
class KrmApiHostBundlesConfigConfigControllerConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
KrmApiHostBundlesConfigConfigControllerConfig.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
KrmApiHostBundlesConfigConfigControllerConfig.from_proto(i)
for i in resources
]
class KrmApiHostManagementConfig(object):
def __init__(self, standard_management_config: dict = None):
self.standard_management_config = standard_management_config
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = krm_api_host_pb2.KrmapihostingAlphaKrmApiHostManagementConfig()
if KrmApiHostManagementConfigStandardManagementConfig.to_proto(
resource.standard_management_config
):
res.standard_management_config.CopyFrom(
KrmApiHostManagementConfigStandardManagementConfig.to_proto(
resource.standard_management_config
)
)
else:
res.ClearField("standard_management_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KrmApiHostManagementConfig(
standard_management_config=KrmApiHostManagementConfigStandardManagementConfig.from_proto(
resource.standard_management_config
),
)
class KrmApiHostManagementConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [KrmApiHostManagementConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [KrmApiHostManagementConfig.from_proto(i) for i in resources]
class KrmApiHostManagementConfigStandardManagementConfig(object):
def __init__(
self,
network: str = None,
master_ipv4_cidr_block: str = None,
man_block: str = None,
cluster_cidr_block: str = None,
services_cidr_block: str = None,
cluster_named_range: str = None,
services_named_range: str = None,
):
self.network = network
self.master_ipv4_cidr_block = master_ipv4_cidr_block
self.man_block = man_block
self.cluster_cidr_block = cluster_cidr_block
self.services_cidr_block = services_cidr_block
self.cluster_named_range = cluster_named_range
self.services_named_range = services_named_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
krm_api_host_pb2.KrmapihostingAlphaKrmApiHostManagementConfigStandardManagementConfig()
)
if Primitive.to_proto(resource.network):
res.network = Primitive.to_proto(resource.network)
if Primitive.to_proto(resource.master_ipv4_cidr_block):
res.master_ipv4_cidr_block = Primitive.to_proto(
resource.master_ipv4_cidr_block
)
if Primitive.to_proto(resource.man_block):
res.man_block = Primitive.to_proto(resource.man_block)
if Primitive.to_proto(resource.cluster_cidr_block):
res.cluster_cidr_block = Primitive.to_proto(resource.cluster_cidr_block)
if Primitive.to_proto(resource.services_cidr_block):
res.services_cidr_block = Primitive.to_proto(resource.services_cidr_block)
if Primitive.to_proto(resource.cluster_named_range):
res.cluster_named_range = Primitive.to_proto(resource.cluster_named_range)
if Primitive.to_proto(resource.services_named_range):
res.services_named_range = Primitive.to_proto(resource.services_named_range)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KrmApiHostManagementConfigStandardManagementConfig(
network=Primitive.from_proto(resource.network),
master_ipv4_cidr_block=Primitive.from_proto(
resource.master_ipv4_cidr_block
),
man_block=Primitive.from_proto(resource.man_block),
cluster_cidr_block=Primitive.from_proto(resource.cluster_cidr_block),
services_cidr_block=Primitive.from_proto(resource.services_cidr_block),
cluster_named_range=Primitive.from_proto(resource.cluster_named_range),
services_named_range=Primitive.from_proto(resource.services_named_range),
)
class KrmApiHostManagementConfigStandardManagementConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
KrmApiHostManagementConfigStandardManagementConfig.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
KrmApiHostManagementConfigStandardManagementConfig.from_proto(i)
for i in resources
]
class KrmApiHostStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return krm_api_host_pb2.KrmapihostingAlphaKrmApiHostStateEnum.Value(
"KrmapihostingAlphaKrmApiHostStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return krm_api_host_pb2.KrmapihostingAlphaKrmApiHostStateEnum.Name(resource)[
len("KrmapihostingAlphaKrmApiHostStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
<filename>fiubar/facultad/migrations/0001_initial.py
# Generated by Django 2.0.4 on 2018-05-04 14:39
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Alumno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('begin_date', models.DateField()),
('graduado_date', models.DateField(null=True)),
('creditos', models.IntegerField(default=0)),
('promedio', models.FloatField(default=0.0)),
('creation_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='AlumnoMateria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(choices=[('C', 'Cursando'), ('F', 'Cursada Aprobada'), ('A', 'Materia Aprobada'), ('E', 'Equivalencia')], default='C', max_length=1)),
('cursada_cuat', models.CharField(blank=True, max_length=10, null=True)),
('cursada_date', models.DateField(blank=True, null=True)),
('aprobada_cuat', models.CharField(blank=True, max_length=10, null=True)),
('aprobada_date', models.DateField(blank=True, null=True)),
('nota', models.IntegerField(blank=True, null=True)),
('creation_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Carrera',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.CharField(max_length=5)),
('name', models.CharField(max_length=100)),
('abbr_name', models.CharField(max_length=100)),
('short_name', models.CharField(max_length=20)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Correlativa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Departamento',
fields=[
('codigo', models.CharField(max_length=10, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('vigente', models.BooleanField(default=True)),
],
options={
'ordering': ['codigo'],
},
),
migrations.CreateModel(
name='Materia',
fields=[
('id', models.CharField(max_length=15, primary_key=True, serialize=False)),
('codigo', models.CharField(max_length=10)),
('name', models.CharField(max_length=100)),
('departamento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facultad.Departamento')),
],
options={
'ordering': ('departamento', 'codigo'),
},
),
migrations.CreateModel(
name='PlanCarrera',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('pub_date', models.DateField()),
('orientacion', models.CharField(blank=True, max_length=255, null=True)),
('abbr_name', models.CharField(max_length=100)),
('short_name', models.CharField(max_length=20)),
('min_creditos', models.IntegerField(verbose_name='Créditos')),
('carrera', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facultad.Carrera')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='PlanMateria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creditos', models.IntegerField()),
('cuatrimestre', models.IntegerField()),
('caracter', models.CharField(max_length=3)),
('correlativas', models.CharField(blank=True, max_length=255, null=True)),
('vigente', models.BooleanField(default=True)),
('materia', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facultad.Materia')),
('plancarrera', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facultad.PlanCarrera')),
],
options={
'ordering': ['plancarrera', 'materia'],
},
),
migrations.AddField(
model_name='correlativa',
name='correlativa',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='correlativa', to='facultad.PlanMateria'),
),
migrations.AddField(
model_name='correlativa',
name='materia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='materia_p', to='facultad.PlanMateria'),
),
migrations.AddField(
model_name='carrera',
name='plan_vigente',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='plan', to='facultad.PlanCarrera'),
),
migrations.AddField(
model_name='alumnomateria',
name='materia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facultad.Materia'),
),
migrations.AddField(
model_name='alumnomateria',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='alumno',
name='carrera',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facultad.Carrera'),
),
migrations.AddField(
model_name='alumno',
name='plancarrera',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facultad.PlanCarrera'),
),
migrations.AddField(
model_name='alumno',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='materia',
unique_together={('departamento', 'codigo')},
),
migrations.AlterUniqueTogether(
name='alumnomateria',
unique_together={('user', 'materia')},
),
migrations.AlterUniqueTogether(
name='alumno',
unique_together={('user', 'plancarrera')},
),
]
|
<reponame>pierky/mrtparse
#!/usr/bin/env python
'''
slice.py - This script slices MRT format data.
Copyright (C) 2016 greenHippo, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
'''
from mrtparse import *
import argparse, time, gzip, bz2, re
from datetime import datetime
def parse_args():
parser = argparse.ArgumentParser(
description='This script slices MRT format data.')
parser.add_argument(
'path_to_file',
help='specify path to MRT format file')
parser.add_argument(
'-s', type=str, metavar='START_TIME', dest='start_time',
help='specify start time in format YYYY-MM-DD HH:MM:SS')
parser.add_argument(
'-e', type=str, metavar='END_TIME', dest='end_time',
help='specify end time in format YYYY-MM-DD HH:MM:SS')
parser.add_argument(
'-i', type=int, metavar='INTERVAL', dest='interval',
help='specify interval in seconds')
parser.add_argument(
'-c', type=str, choices=['gz', 'bz2'], dest='compress_type',
help='specify compress type (gz, bz2)')
return parser.parse_args()
def conv_unixtime(t):
try:
t = datetime.strptime(t, '%Y-%m-%d %H:%M:%S')
t = int(time.mktime(t.timetuple()))
except TypeError:
t = None
except ValueError:
print('error: invalid time \'%s\'' % t)
exit(1)
return t
def file_open(f, t, c):
f = re.sub(r'.gz$|.bz2$', '', f)
t = datetime.fromtimestamp(t).strftime('%Y%m%d-%H%M%S')
if c is None:
return open('%s-%s' % (f, t), 'wb')
elif c == 'gz':
return gzip.GzipFile('%s-%s.%s' % (f, t, c), 'wb')
elif c == 'bz2':
return bz2.BZ2File('%s-%s.%s' % (f, t, c), 'wb')
def slice_mrt(args):
t = start_time = conv_unixtime(args.start_time)
end_time = conv_unixtime(args.end_time)
interval = args.interval
if t is None:
d = Reader(args.path_to_file)
m = d.next()
t = m.mrt.ts
f = file_open(args.path_to_file, t, args.compress_type)
d = Reader(args.path_to_file)
for m in d:
m = m.mrt
if start_time and (m.ts < start_time):
continue
if end_time and (m.ts >= end_time):
break
if interval and (m.ts >= t + interval):
f.close()
t += interval
f = file_open(args.path_to_file, t, args.compress_type)
f.write(m.buf)
f.close()
def main():
args = parse_args()
slice_mrt(args)
if __name__ == '__main__':
main()
|
<filename>memegen/domain/template.py
import os
import hashlib
import shutil
from pathlib import Path
from contextlib import suppress
import tempfile
import requests
from PIL import Image
import log
from .text import Text
DEFAULT_REQUEST_HEADERS = {
'User-Agent': "Googlebot/2.1 (+http://www.googlebot.com/bot.html)",
}
class Template:
"""Blank image to generate a meme."""
DEFAULT = 'default'
EXTENSIONS = ('.png', '.jpg')
SAMPLE_LINES = ["YOUR TEXT", "GOES HERE"]
VALID_LINK_FLAG = '.valid_link.tmp'
MIN_HEIGHT = 240
MIN_WIDTH = 240
def __init__(self, key, name=None, lines=None, aliases=None, link=None,
root=None):
self.key = key
self.name = name or ""
self.lines = lines or [""]
self.aliases = aliases or []
self.link = link or ""
self.root = root or ""
def __str__(self):
return self.key
def __eq__(self, other):
return self.key == other.key
def __ne__(self, other):
return self.key != other.key
def __lt__(self, other):
return self.name < other.name
@property
def dirpath(self):
return os.path.join(self.root, self.key)
@property
def path(self):
return self.get_path()
@property
def default_text(self):
return Text(self.lines)
@property
def default_path(self):
return self.default_text.path or Text.EMPTY
@property
def sample_text(self):
return self.default_text or Text(self.SAMPLE_LINES)
@property
def sample_path(self):
return self.sample_text.path
@property
def aliases_lowercase(self):
return [self.strip(a, keep_special=True) for a in self.aliases]
@property
def aliases_stripped(self):
return [self.strip(a, keep_special=False) for a in self.aliases]
@property
def styles(self):
return sorted(self._styles())
def _styles(self):
"""Yield all template style names."""
for filename in os.listdir(self.dirpath):
name, ext = os.path.splitext(filename.lower())
if ext in self.EXTENSIONS and name != self.DEFAULT:
yield name
@property
def keywords(self):
words = set()
for fields in [self.key, self.name] + self.aliases + self.lines:
for word in fields.lower().replace(Text.SPACE, ' ').split(' '):
if word:
words.add(word)
return words
@staticmethod
def strip(text, keep_special=False):
text = text.lower().strip().replace(' ', '-')
if not keep_special:
for char in ('-', '_', '!', "'"):
text = text.replace(char, '')
return text
def get_path(self, style_or_url=None, *, download=True):
path = None
if style_or_url and '://' in style_or_url:
if download:
path = download_image(style_or_url)
if path is None:
path = self._find_path_for_style(self.DEFAULT)
else:
names = [n.lower() for n in [style_or_url, self.DEFAULT] if n]
path = self._find_path_for_style(*names)
return path
def _find_path_for_style(self, *names):
for name in names:
for extension in self.EXTENSIONS:
path = Path(self.dirpath, name + extension)
with suppress(OSError):
if path.is_file():
return path
return None
def search(self, query):
"""Count the number of times a query exists in relevant fields."""
if query is None:
return -1
count = 0
for field in [self.key, self.name] + self.aliases + self.lines:
count += field.lower().count(query.lower())
return count
def validate(self, validators=None):
if validators is None:
validators = [
self.validate_meta,
self.validate_link,
self.validate_size,
]
for validator in validators:
if not validator():
return False
return True
def validate_meta(self):
if not self.lines:
self._error("has no default lines of text")
return False
if not self.name:
self._error("has no name")
return False
if not self.name[0].isalnum():
self._error(f"name '{self.name}' should start with alphanumeric")
return False
if not self.path:
self._error("has no default image")
return False
return True
def validate_link(self):
if not self.link:
return True
flag = Path(self.dirpath, self.VALID_LINK_FLAG)
with suppress(IOError):
with flag.open() as f:
if f.read() == self.link:
log.info(f"Link already checked: {self.link}")
return True
log.info(f"Checking link {self.link}")
try:
response = requests.head(self.link, timeout=5,
headers=DEFAULT_REQUEST_HEADERS)
except requests.exceptions.ReadTimeout:
log.warning("Connection timed out")
return True # assume URL is OK; it will be checked again
if response.status_code in [403, 429, 503]:
self._warn(f"link is unavailable ({response.status_code})")
elif response.status_code >= 400:
self._error(f"link is invalid ({response.status_code})")
return False
with flag.open('w') as f:
f.write(self.link)
return True
def validate_size(self):
im = Image.open(self.path)
w, h = im.size
if w < self.MIN_WIDTH or h < self.MIN_HEIGHT:
log.error("Image must be at least "
f"{self.MIN_WIDTH}x{self.MIN_HEIGHT} (is {w}x{h})")
return False
return True
def _warn(self, message):
log.warning(f"Template '{self}' " + message)
def _error(self, message):
log.error(f"Template '{self}' " + message)
class Placeholder:
"""Default image for missing templates."""
FALLBACK_PATH = str(Path(__file__)
.parents[1]
.joinpath('static', 'images', 'missing.png'))
path = None
def __init__(self, key):
self.key = key
@classmethod
def get_path(cls, url=None, download=True):
path = None
if url and download:
path = download_image(url)
if path is None:
path = cls.FALLBACK_PATH
return path
def download_image(url):
if not url or '://' not in url:
raise ValueError(f"Not a URL: {url!r}")
path = Path(tempfile.gettempdir(),
hashlib.md5(url.encode('utf-8')).hexdigest())
if path.is_file():
log.debug(f"Already downloaded: {url}")
return path
try:
response = requests.get(url, stream=True, timeout=5,
headers=DEFAULT_REQUEST_HEADERS)
except ValueError:
log.error(f"Invalid link: {url}")
return None
except requests.exceptions.RequestException:
log.error(f"Bad connection: {url}")
return None
if response.status_code == 200:
log.info(f"Downloading {url}")
with open(str(path), 'wb') as outfile:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, outfile)
return path
log.error(f"Unable to download: {url}")
return None
|
<reponame>archimarkGit/compas
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import itertools
import os
import sys
import compas_rhino
import compas._os
import compas.plugins
__all__ = ['install']
def install(version=None, packages=None):
"""Install COMPAS for Rhino.
Parameters
----------
version : {'5.0', '6.0', '7.0'}, optional
The version number of Rhino.
Default is ``'6.0'``.
packages : list of str, optional
List of packages to install or None to use default package list.
Default is ``['compas', 'compas_rhino', 'compas_ghpython']``.
Examples
--------
.. code-block:: python
import compas_rhino
compas_rhino.install('6.0')
.. code-block:: bash
python -m compas_rhino.install -v 6.0
"""
if version not in ('5.0', '6.0', '7.0'):
version = '6.0'
packages = _filter_installable_packages(version, packages)
ipylib_path = compas_rhino._get_ironpython_lib_path(version)
scripts_path = compas_rhino._get_scripts_path(version)
print('Installing COMPAS packages to Rhino {0} scripts folder:'.format(version))
print('Location scripts folder: {}'.format(scripts_path))
print()
results = []
symlinks_to_install = []
symlinks_to_uninstall = []
exit_code = 0
for package in packages:
package_path = compas_rhino._get_package_path(importlib.import_module(package))
symlink_path = os.path.join(scripts_path, package)
symlinks_to_install.append(dict(name=package, source_path=package_path, link=symlink_path))
symlinks_to_uninstall.append(dict(name=package, link=symlink_path))
# Handle legacy install location
legacy_path = os.path.join(ipylib_path, package)
if os.path.exists(legacy_path):
symlinks_to_uninstall.append(dict(name=package, link=legacy_path))
# First uninstall existing copies of packages requested for installation
symlinks = [link['link'] for link in symlinks_to_uninstall]
uninstall_results = compas._os.remove_symlinks(symlinks)
for uninstall_data, success in zip(symlinks_to_uninstall, uninstall_results):
if not success:
results.append((uninstall_data['name'], 'ERROR: Cannot remove symlink, try to run as administrator.'))
# Handle legacy bootstrapper
if not compas_rhino._try_remove_bootstrapper(ipylib_path):
results.append(('compas_bootstrapper', 'ERROR: Cannot remove legacy compas_bootstrapper, try to run as administrator.'))
# Ready to start installing
symlinks = [(link['source_path'], link['link']) for link in symlinks_to_install]
install_results = compas._os.create_symlinks(symlinks)
for install_data, success in zip(symlinks_to_install, install_results):
result = 'OK' if success else 'ERROR: Cannot create symlink, try to run as administrator.'
results.append((install_data['name'], result))
if not all(install_results):
exit_code = -1
if exit_code == -1:
results.append(('compas_bootstrapper', 'WARNING: One or more packages failed, will not install bootstrapper, try uninstalling first'))
else:
try:
_update_bootstrapper(scripts_path, packages)
results.append(('compas_bootstrapper', 'OK'))
except: # noqa: E722
results.append(('compas_bootstrapper', 'ERROR: Could not create compas_bootstrapper to auto-determine Python environment'))
for package, status in results:
print(' {} {}'.format(package.ljust(20), status))
if status != 'OK':
exit_code = -1
print('\nCompleted.')
if exit_code != 0:
sys.exit(exit_code)
@compas.plugins.plugin(category='install', pluggable_name='installable_rhino_packages', tryfirst=True)
def default_installable_rhino_packages():
# While this list could obviously be hard-coded, I think
# eating our own dogfood and using plugins to define this, just like
# any other extension/plugin would be is a better way to ensure consistent behavior.
return ['compas', 'compas_rhino']
@compas.plugins.pluggable(category='install', selector='collect_all')
def installable_rhino_packages():
"""Provide a list of packages to make available inside Rhino.
Extensions providing Rhino or Grasshopper features
can implement this pluggable interface to automatically
have their packages made available inside Rhino when
COMPAS is installed into it.
Examples
--------
>>> import compas.plugins
>>> @compas.plugins.plugin(category='install')
... def installable_rhino_packages():
... return ['compas_fab']
Returns
-------
:obj:`list` of :obj:`str`
List of package names to make available inside Rhino.
"""
pass
def _update_bootstrapper(install_path, packages):
# Take either the CONDA environment directory or the current Python executable's directory
python_directory = os.environ.get('CONDA_PREFIX', None) or os.path.dirname(sys.executable)
environment_name = os.environ.get('CONDA_DEFAULT_ENV', '')
conda_exe = os.environ.get('CONDA_EXE', '')
compas_bootstrapper = compas_rhino._get_bootstrapper_path(install_path)
bootstrapper_data = compas_rhino._get_bootstrapper_data(compas_bootstrapper)
installed_packages = bootstrapper_data.get('INSTALLED_PACKAGES', [])
installed_packages = list(set(installed_packages + list(packages)))
with open(compas_bootstrapper, 'w') as f:
f.write('ENVIRONMENT_NAME = r"{}"\n'.format(environment_name))
f.write('PYTHON_DIRECTORY = r"{}"\n'.format(python_directory))
f.write('CONDA_EXE = r"{}"\n'.format(conda_exe))
f.write('INSTALLED_PACKAGES = {}'.format(repr(installed_packages)))
def _filter_installable_packages(version, packages):
ghpython_incompatible = False
if compas._os.system == 'darwin' and version == 5.0:
ghpython_incompatible = True
if not packages:
# Flatten list of results (resulting from collect_all pluggable)
packages = list(itertools.chain.from_iterable(installable_rhino_packages()))
elif 'compas_ghpython' in packages and ghpython_incompatible:
print('Skipping installation of compas_ghpython since it\'s not supported for Rhino 5 for Mac')
if ghpython_incompatible:
packages.remove('compas_ghpython')
return packages
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', choices=['5.0', '6.0', '7.0'], default='6.0', help="The version of Rhino to install the packages in.")
parser.add_argument('-p', '--packages', nargs='+', help="The packages to install.")
args = parser.parse_args()
install(version=args.version, packages=args.packages)
|
<filename>Code/odooerp/odoo-8.0/openerp/addons/analytic/analytic.py
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj.state == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj.state == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj.state == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)], context=context))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id))
self.invalidate_cache(cr, uid, ['currency_id'], [account.id], context=context)
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange', copy=False),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts', copy=True),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries', copy=False),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager', track_visibility='onchange'),
'manager_id': fields.many2one('res.users', 'Account Manager', track_visibility='onchange'),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('pending','To Renew'),
('close','Closed'),
('cancelled', 'Cancelled')],
'Status', required=True,
track_visibility='onchange', copy=False),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
if not date_start:
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context=None):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account', context=context),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'manager_id': lambda self, cr, uid, ctx: ctx.get('manager_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'code, name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
""" executed only on the toplevel copied object of the hierarchy.
Subobject are actually copied with copy_data"""
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % analytic['name']
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
account_ids = []
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
if '/' in name:
for name2 in name.split('/'):
# intermediate search without limit and args - could be expensive for large tables if `name` is not selective
account_ids = self.search(cr, uid, dom + [('name', operator, name2.strip())], limit=None, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
if account_ids and args:
# final filtering according to domain (args)4
account_ids = self.search(cr, uid, [('id', 'in', account_ids)] + args, limit=limit, context=context)
if not account_ids:
return super(account_analytic_account, self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
'''
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations
under the License.
'''
import codecs
import os
import _pickle as cPickle
import numpy as np
from functools import reduce
class weekUtils:
def __init__(self, sCorpusDir=None, sSentencePositionsDir=None,
sName=None, bVerbose=False,wk=1):
self.bVerbose = bVerbose
self.sName = sName
aFiles = [ "week%i.txt" % i for i in range(1,int(wk))]
self.aFiles = []
self.iTotalNrOfSentences = 0
for sFile in aFiles:
sPickleFile = os.path.join(sSentencePositionsDir, "%s.pickle" % sFile)
if self.bVerbose:
print("Loading %s" % sPickleFile)
fhSentencePositions = open(sPickleFile, mode='rb')
npaSentencePositions = cPickle.load(fhSentencePositions)
fhSentencePositions.close()
sFileName = os.path.join(sCorpusDir, sFile)
self.aFiles.append({"sFileName": sFileName,
"fhFile": \
codecs.open(sFileName, mode="r", encoding="utf8"),
"npaSentencePositions": npaSentencePositions,
"npaIndicesForTuples": \
np.array(range(npaSentencePositions.shape[0])),
"npaIndicesForRandom": \
np.array(range(npaSentencePositions.shape[0])),
"iYieldedTuple": 0,
"iYieldedRandom": 0
})
self.iTotalNrOfSentences += npaSentencePositions.shape[0]
self.aNonTokens = ['.', "''", '``', ',', ':', ';', '?', '!', '-', '_']
def __iter__(self): # Simple wrapper
for t in self.yieldTuple():
yield t
def yieldSentence(self):
# NOTE that we (ab)use the random indices, which were not shuffled yet
for iFileIndex in range(len(self.aFiles)):
# Get the next index for the sentence position array
for iSentencePosition in self.aFiles[iFileIndex]["npaSentencePositions"]:
# Go to that position in the file
self.aFiles[iFileIndex]["fhFile"].seek(iSentencePosition)
# Read the sentence
yield self.toTokens(self.aFiles[iFileIndex]["fhFile"].readline())
def yieldRandomSentence(self):
'''
NOTE that this iterator will yield FOREVER
'''
# Every time this iterator is started, we shuffle
for i in range(len(self.aFiles)):
np.random.shuffle(self.aFiles[i]["npaIndicesForRandom"])
#np.random.shuffle(self.aFiles[1]["npaIndicesForRandom"])
# And we reset
for i in range(len(self.aFiles)):
self.aFiles[i]["iYieldedRandom"] = 0
#self.aFiles[1]["iYieldedRandom"] = 0
while(1):
iFileIndex = np.random.randint(0,len(self.aFiles)) # Choose file 0 or 1
# If we yielded as many indices as there are, shuffle again
if self.aFiles[iFileIndex]["iYieldedRandom"] == \
self.aFiles[iFileIndex]["npaIndicesForRandom"].shape[0]:
np.random.shuffle(self.aFiles[iFileIndex]["npaIndicesForRandom"])
self.aFiles[iFileIndex]["iYieldedRandom"] = 0
# Get a random index for the sentence position array
iSentencePositionIndex = \
self.aFiles[iFileIndex]["npaIndicesForRandom"][self.aFiles[iFileIndex]["iYieldedRandom"]]
# Get the position where the sentence starts
iSentencePosition = self.aFiles[iFileIndex]["npaSentencePositions"][iSentencePositionIndex]
# Go to that position in the file
self.aFiles[iFileIndex]["fhFile"].seek(iSentencePosition)
# Read the sentence
yield self.toTokens(self.aFiles[iFileIndex]["fhFile"].readline())
self.aFiles[iFileIndex]["iYieldedRandom"] += 1
def yieldTuple(self):
'''
This yields a random tuple, until all tuples are yielded
'''
# Every time this iterator is started, we shuffle
for i in range(len(self.aFiles)):
np.random.shuffle(self.aFiles[0]["npaIndicesForTuples"])
#np.random.shuffle(self.aFiles[1]["npaIndicesForTuples"])
# And we reset
for i in range(len(self.aFiles)):
self.aFiles[i]["iYieldedTuple"] = 0
#self.aFiles[1]["iYieldedTuple"] = 0
while( reduce((lambda x,y: x or y),map((lambda x:x['iYieldedTuple']<x['npaIndicesForTuples'].shape[0]),self.aFiles))
# (self.aFiles[0]["iYieldedTuple"] < \
# self.aFiles[0]["npaIndicesForTuples"].shape[0]) \
# or (self.aFiles[1]["iYieldedTuple"] <
# self.aFiles[1]["npaIndicesForTuples"].shape[0])
):
# iFileIndex=0
#i=0
#while i<len(self.aFiles) and self.aFiles[i]["iYieldedTuple"] >=self.aFiles[i]["npaIndicesForTuples"].shape[0]:
# if self.bVerbose:
# print("i shape:%i" % self.aFiles[i]["iYieldedTuple"])
# print("shape: %i" % self.aFiles[i]["npaIndicesForTuples"].shape[0])
# i+=1
#if self.bVerbose:
# print("i is %i" % i)
#iFileIndex =i
#if iFileIndex==len(self.aFiles) and self.aFiles[i-1]["iYieldedTuple"] >= \
# self.aFiles[i-1]["npaIndicesForTuples"].shape[0]:
# iFileIndex = 0
#elif iFileIndex==len(self.aFiles): # We haven't reached the end for any of the two
iFileIndex = np.random.randint(0,len(self.aFiles)) # Choose file 0 or 1
while self.aFiles[iFileIndex]["iYieldedTuple"]>= self.aFiles[iFileIndex]["npaIndicesForTuples"].shape[0]:
iFileIndex=np.random.randint(0,len(self.aFiles))
#iFileIndex=0
if self.bVerbose:
print("iFileIndex:%i" % iFileIndex)
#print(str(len(self.aFiles)))
# Get a random position in the sentence position array
# We don't want the very first or last sentence
bDone = False
iSentencePositionIndex = 0
while (iSentencePositionIndex == 0) or \
(iSentencePositionIndex == \
(self.aFiles[iFileIndex]["npaIndicesForTuples"].shape[0] - 1)):
if self.aFiles[iFileIndex]["iYieldedTuple"] == \
self.aFiles[iFileIndex]["npaIndicesForTuples"].shape[0]:
bDone = True
break
iSentencePositionIndex = \
self.aFiles[iFileIndex]["npaIndicesForTuples"][self.aFiles[iFileIndex]["iYieldedTuple"]]
self.aFiles[iFileIndex]["iYieldedTuple"] += 1
if bDone:
continue
# Get the position of the sentence BEFORE it
iSentencePosition = self.aFiles[iFileIndex]["npaSentencePositions"][iSentencePositionIndex-1]
# Go to that position
self.aFiles[iFileIndex]["fhFile"].seek(iSentencePosition)
# Read three sentences
aSentence_n_min_1 = \
self.toTokens(self.aFiles[iFileIndex]["fhFile"].readline())
aSentence_n = self.toTokens(self.aFiles[iFileIndex]["fhFile"].readline())
aSentence_n_plus_1 = \
self.toTokens(self.aFiles[iFileIndex]["fhFile"].readline())
# Yield the tuple, sentence n first
yield (aSentence_n, aSentence_n_min_1, aSentence_n_plus_1)
def toTokens(self, sLine):
return [x for x in sLine.strip().split(' ') if x not in self.aNonTokens]
# You can use the bit below to test something
if __name__ == "__main__":
import argparse
oArgsParser = \
argparse.ArgumentParser(description='Toronto Book Corpus utils')
oArgsParser.add_argument("TORONTO_BOOK_CORPUS_DIR")
oArgsParser.add_argument("TORONTO_BOOK_CORPUS_SENTENCE_POSITIONS_DIR")
oArgsParser.add_argument("-r", dest="bRandom", action="store_true")
oArgsParser.add_argument("-d", dest="bDebug", action="store_true")
oArgsParser.add_argument("-v", dest="bVerbose", action="store_true")
oArgs = oArgsParser.parse_args()
if oArgs.bDebug:
import pdb
pdb.set_trace()
oToBoCo = \
torontoBookCorpusIterator(oArgs.TORONTO_BOOK_CORPUS_DIR,
oArgs.TORONTO_BOOK_CORPUS_SENTENCE_POSITIONS_DIR,
bVerbose=oArgs.bVerbose)
i = 0
if oArgs.bRandom:
for s in oToBoCo.yieldRandomSentence():
print(' '.join(s))
i += 1
if i == 10:
break
else:
funcRandomIterator = oToBoCo.yieldRandomSentence()
for t in oToBoCo.yieldTuple():
aRandomTokens1 = next(funcRandomIterator)
aRandomTokens2 = next(funcRandomIterator)
print("s : %s\ns-1: %s\ns+1: %s\nr1 : %s\nr2 : %s\n" % (' '.join(t[0]),
' '.join(t[1]),
' '.join(t[2]),
' '.join(aRandomTokens1),
' '.join(aRandomTokens2) ))
|
<reponame>SimonBiggs/platipy<filename>platipy/imaging/augment/defaug.py
from abc import ABC, abstractmethod
from collections.abc import Iterable
import random
import SimpleITK as sitk
from platipy.imaging.deformation_fields.deformation_field_operations import (
generate_field_shift,
generate_field_expand,
get_bone_mask,
)
from platipy.imaging.registration.registration import apply_field
def apply_augmentation(image, augmentation, masks=[]):
if not isinstance(image, sitk.Image):
raise AttributeError("image should be a SimpleITK.Image")
if isinstance(augmentation, DeformableAugment):
augmentation = [augmentation]
if not isinstance(augmentation, Iterable):
raise AttributeError(
"augmentation must be a DeformableAugment or an iterable (such as list) of"
"DeformableAugment's"
)
transforms = []
dvf = None
for aug in augmentation:
if not isinstance(aug, DeformableAugment):
raise AttributeError("Each augmentation must be of type DeformableAugment")
tfm, field = aug.augment()
transforms.append(tfm)
if dvf is None:
dvf = field
else:
dvf += field
transform = sitk.CompositeTransform(transforms)
del transforms
image_deformed = apply_field(
image,
transform,
structure=False,
default_value=int(sitk.GetArrayViewFromImage(image).min()),
)
masks_deformed = []
for mask in masks:
masks_deformed.append(apply_field(mask, transform=transform, structure=True, interp=1))
if masks:
return image_deformed, masks_deformed, dvf
return image_deformed, dvf
def generate_random_augmentation(ct_image, masks):
random.shuffle(masks)
# mask_count = len(masks)
# masks = masks[: random.randint(2, 5)]
# print(len(masks))
augmentation_types = [
{
"class": ShiftAugment,
"args": {"vector_shift": [(-10, 10), (10, 10), (-10, 10)], "gaussian_smooth": (3, 5)},
},
{
"class": ContractAugment,
"args": {
"vector_contract": [(0, 10), (0, 10), (0, 10)],
"gaussian_smooth": (3, 5),
"bone_mask": True,
},
},
{
"class": ExpandAugment,
"args": {
"vector_expand": [(0, 10), (0, 10), (0, 10)],
"gaussian_smooth": (3, 5),
"bone_mask": True,
},
},
]
augmentation = []
for mask in masks:
aug = random.choice(augmentation_types)
aug_class = aug["class"]
aug_args = {}
for arg in aug["args"]:
value = aug["args"][arg]
if isinstance(value, list):
# Randomly sample for each dim
result = []
for rng in value:
result.append(random.randint(rng[0], rng[1]))
value = result
elif isinstance(value, tuple):
# Randomly sample a value in range
value = random.randint(value[0], value[1])
if arg == "bone_mask" and aug["args"][arg]:
value = get_bone_mask(ct_image)
aug_args[arg] = value
augmentation.append(aug_class(mask, **aug_args))
return augmentation
class DeformableAugment(ABC):
@abstractmethod
def augment(self):
# return deformation
pass
class ShiftAugment(DeformableAugment):
def __init__(self, mask, vector_shift=(10, 10, 10), gaussian_smooth=5):
self.mask = mask
self.vector_shift = vector_shift
self.gaussian_smooth = gaussian_smooth
def augment(self):
_, transform, dvf = generate_field_shift(
self.mask, self.vector_shift, self.gaussian_smooth,
)
return transform, dvf
class ExpandAugment(DeformableAugment):
def __init__(self, mask, vector_expand=(10, 10, 10), gaussian_smooth=5, bone_mask=False):
self.mask = mask
self.vector_expand = vector_expand
self.gaussian_smooth = gaussian_smooth
self.bone_mask = bone_mask
def augment(self):
_, transform, dvf = generate_field_expand(
self.mask,
bone_mask=self.bone_mask,
expand=self.vector_expand,
gaussian_smooth=self.gaussian_smooth,
)
return transform, dvf
class ContractAugment(DeformableAugment):
def __init__(self, mask, vector_contract=(10, 10, 10), gaussian_smooth=5, bone_mask=False):
self.mask = mask
self.contract = [int(-x / s) for x, s in zip(vector_contract, mask.GetSpacing())]
self.gaussian_smooth = gaussian_smooth
self.bone_mask = bone_mask
def augment(self):
_, transform, dvf = generate_field_expand(
self.mask,
bone_mask=self.bone_mask,
expand=self.contract,
gaussian_smooth=self.gaussian_smooth,
)
return transform, dvf
|
<gh_stars>10-100
# Copyright (c) Nanjing University, Vision Lab.
# Last update:
# 2020.11.26
# 2019.11.13
# 2019.10.27
# 2019.10.07
# 2019.10.08
import os
import argparse
import numpy as np
import tensorflow as tf
import time
import importlib
import subprocess
tf.enable_eager_execution()
import models.model_voxception as model
from models.entropy_model import EntropyBottleneck
from models.conditional_entropy_model import SymmetricConditional
################### Compression Network (with factorized entropy model) ###################
def compress_factorized(cubes, model, ckpt_dir):
"""Compress cubes to bitstream.
Input: cubes with shape [batch size, length, width, height, channel(1)].
Output: compressed bitstream.
"""
print('===== Compress =====')
# load model.
#model = importlib.import_module(model)
analysis_transform = model.AnalysisTransform()
# synthesis_transform = model.SynthesisTransform()
entropy_bottleneck = EntropyBottleneck()
checkpoint = tf.train.Checkpoint(analysis_transform=analysis_transform,
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
x = tf.convert_to_tensor(cubes, "float32")
def loop_analysis(x):
x = tf.expand_dims(x, 0)
y = analysis_transform(x)
return tf.squeeze(y)
start = time.time()
ys = tf.map_fn(loop_analysis, x, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Analysis Transform: {}s".format(round(time.time()-start, 4)))
start = time.time()
strings, min_v, max_v = entropy_bottleneck.compress(ys)
shape = tf.shape(ys)[:]
print("Entropy Encode: {}s".format(round(time.time()-start, 4)))
return strings, min_v, max_v, shape
def decompress_factorized(strings, min_v, max_v, shape, model, ckpt_dir):
"""Decompress bitstream to cubes.
Input: compressed bitstream.
Output: cubes with shape [batch size, length, width, height, channel(1)]
"""
print('===== Decompress =====')
# load model.
#model = importlib.import_module(model)
# analysis_transform = model.AnalysisTransform()
synthesis_transform = model.SynthesisTransform()
entropy_bottleneck = EntropyBottleneck()
checkpoint = tf.train.Checkpoint(synthesis_transform=synthesis_transform,
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
start = time.time()
ys = entropy_bottleneck.decompress(strings, min_v, max_v, shape, shape[-1])
print("Entropy Decode: {}s".format(round(time.time()-start, 4)))
def loop_synthesis(y):
y = tf.expand_dims(y, 0)
x = synthesis_transform(y)
return tf.squeeze(x, [0])
start = time.time()
xs = tf.map_fn(loop_synthesis, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Synthesis Transform: {}s".format(round(time.time()-start, 4)))
return xs
################### Compression Network (with conditional entropy model) ###################
def compress_hyper(cubes, model, ckpt_dir, decompress=False):
"""Compress cubes to bitstream.
Input: cubes with shape [batch size, length, width, height, channel(1)].
Output: compressed bitstream.
"""
print('===== Compress =====')
# load model.
#model = importlib.import_module(model)
analysis_transform = model.AnalysisTransform()
synthesis_transform = model.SynthesisTransform()
hyper_encoder = model.HyperEncoder()
hyper_decoder = model.HyperDecoder()
entropy_bottleneck = EntropyBottleneck()
conditional_entropy_model = SymmetricConditional()
checkpoint = tf.train.Checkpoint(analysis_transform=analysis_transform,
synthesis_transform=synthesis_transform,
hyper_encoder=hyper_encoder,
hyper_decoder=hyper_decoder,
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
x = tf.convert_to_tensor(cubes, "float32")
def loop_analysis(x):
x = tf.expand_dims(x, 0)
y = analysis_transform(x)
return tf.squeeze(y)
start = time.time()
ys = tf.map_fn(loop_analysis, x, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Analysis Transform: {}s".format(round(time.time()-start, 4)))
def loop_hyper_encoder(y):
y = tf.expand_dims(y, 0)
z = hyper_encoder(y)
return tf.squeeze(z)
start = time.time()
zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Hyper Encoder: {}s".format(round(time.time()-start, 4)))
z_hats, _ = entropy_bottleneck(zs, False)
print("Quantize hyperprior.")
def loop_hyper_deocder(z):
z = tf.expand_dims(z, 0)
loc, scale = hyper_decoder(z)
return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])
start = time.time()
locs, scales = tf.map_fn(loop_hyper_deocder, z_hats, dtype=(tf.float32, tf.float32),
parallel_iterations=1, back_prop=False)
lower_bound = 1e-9# TODO
scales = tf.maximum(scales, lower_bound)
print("Hyper Decoder: {}s".format(round(time.time()-start, 4)))
start = time.time()
z_strings, z_min_v, z_max_v = entropy_bottleneck.compress(zs)
z_shape = tf.shape(zs)[:]
print("Entropy Encode (Hyper): {}s".format(round(time.time()-start, 4)))
start = time.time()
# y_strings, y_min_v, y_max_v = conditional_entropy_model.compress(ys, locs, scales)
# y_shape = tf.shape(ys)[:]
def loop_range_encode(args):
y, loc, scale = args
y = tf.expand_dims(y, 0)
loc = tf.expand_dims(loc, 0)
scale = tf.expand_dims(scale, 0)
y_string, y_min_v, y_max_v = conditional_entropy_model.compress(y, loc, scale)
return y_string, y_min_v, y_max_v
args = (ys, locs, scales)
y_strings, y_min_vs, y_max_vs = tf.map_fn(loop_range_encode, args,
dtype=(tf.string, tf.int32, tf.int32),
parallel_iterations=1, back_prop=False)
y_shape = tf.convert_to_tensor(np.insert(tf.shape(ys)[1:].numpy(), 0, 1))
print("Entropy Encode: {}s".format(round(time.time()-start, 4)))
if decompress:
start = time.time()
def loop_range_decode(args):
y_string, loc, scale, y_min_v, y_max_v = args
loc = tf.expand_dims(loc, 0)
scale = tf.expand_dims(scale, 0)
y_decoded = conditional_entropy_model.decompress(y_string, loc, scale, y_min_v, y_max_v, y_shape)
return tf.squeeze(y_decoded, 0)
args = (y_strings, locs, scales, y_min_vs, y_max_vs)
y_decodeds = tf.map_fn(loop_range_decode, args, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Entropy Decode: {}s".format(round(time.time()-start, 4)))
def loop_synthesis(y):
y = tf.expand_dims(y, 0)
x = synthesis_transform(y)
return tf.squeeze(x, [0])
start = time.time()
x_decodeds = tf.map_fn(loop_synthesis, y_decodeds, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Synthesis Transform: {}s".format(round(time.time()-start, 4)))
return y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, x_decodeds
return y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape
def decompress_hyper(y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, model, ckpt_dir):
"""Decompress bitstream to cubes.
Input: compressed bitstream. latent representations (y) and hyper prior (z).
Output: cubes with shape [batch size, length, width, height, channel(1)]
"""
print('===== Decompress =====')
# load model.
#model = importlib.import_module(model)
synthesis_transform = model.SynthesisTransform()
hyper_encoder = model.HyperEncoder()
hyper_decoder = model.HyperDecoder()
entropy_bottleneck = EntropyBottleneck()
conditional_entropy_model = SymmetricConditional()
checkpoint = tf.train.Checkpoint(synthesis_transform=synthesis_transform,
hyper_encoder=hyper_encoder,
hyper_decoder=hyper_decoder,
estimator=entropy_bottleneck)
status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))
start = time.time()
zs = entropy_bottleneck.decompress(z_strings, z_min_v, z_max_v, z_shape, z_shape[-1])
print("Entropy Decoder (Hyper): {}s".format(round(time.time()-start, 4)))
def loop_hyper_deocder(z):
z = tf.expand_dims(z, 0)
loc, scale = hyper_decoder(z)
return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])
start = time.time()
locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32),
parallel_iterations=1, back_prop=False)
lower_bound = 1e-9# TODO
scales = tf.maximum(scales, lower_bound)
print("Hyper Decoder: {}s".format(round(time.time()-start, 4)))
start = time.time()
# ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape)
def loop_range_decode(args):
y_string, loc, scale, y_min_v, y_max_v = args
loc = tf.expand_dims(loc, 0)
scale = tf.expand_dims(scale, 0)
y_decoded = conditional_entropy_model.decompress(y_string, loc, scale, y_min_v, y_max_v, y_shape)
return tf.squeeze(y_decoded, 0)
args = (y_strings, locs, scales, y_min_vs, y_max_vs)
ys = tf.map_fn(loop_range_decode, args, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Entropy Decoder: {}s".format(round(time.time()-start, 4)))
def loop_synthesis(y):
y = tf.expand_dims(y, 0)
x = synthesis_transform(y)
return tf.squeeze(x, [0])
start = time.time()
xs = tf.map_fn(loop_synthesis, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False)
print("Synthesis Transform: {}s".format(round(time.time()-start, 4)))
return xs
|
<filename>psydac/mapping/analytical.py
# coding: utf-8
#
# Copyright 2018 <NAME>
import numpy as np
import sympy as sym
from abc import ABCMeta
from psydac.mapping.basic import Mapping
__all__ = ['IdentityMapping','SymbolicMapping','AnalyticalMapping']
#==============================================================================
class SymbolicMapping:
""" Coordinate transformation from parametric space (eta)
to physical space (x).
Object is purely symbolic.
"""
def __init__( self, eta_symbols, map_expressions ):
import sympy as sym
self._eta = sym.Array( eta_symbols )
self._map = sym.Array( map_expressions )
#--------------------------------------------------------------------------
def compute_derivatives( self, max_order=1 ):
tensors = [None]*(max_order+1)
tensors[0] = self._map
for i in range( max_order ):
tensors[i+1] = sym.derive_by_array( tensors[i], self._eta )
self._derivs_tensors = tensors
#--------------------------------------------------------------------------
@property
def eta( self ):
return self._eta
@property
def map( self ):
return self._map
@property
def jac_mat( self ):
# return self._derivs_tensors[1].tomatrix().T
if not hasattr( self, '_jac_mat' ):
self._jac_mat = sym.Matrix( self._map ).jacobian( self._eta )
return self._jac_mat
@property
def metric( self ):
jm = self.jac_mat
if not hasattr( self, '_metric' ):
self._metric = sym.simplify( jm.T * jm )
return self._metric
@property
def metric_det( self ):
metric = self.metric
if not hasattr( self, '_metric_det' ):
self._metric_det = metric.det().simplify()
return self._metric_det
@property
def ldim( self ):
return len( self._eta )
@property
def pdim( self ):
return len( self._map )
#==============================================================================
class AnalyticalMappingMeta( ABCMeta ):
#--------------------------------------------------------------------------
# Overwrite class creation for any subclass of 'AnalyticalMapping'
#--------------------------------------------------------------------------
def __new__( meta, name, bases, dct ):
if name != 'AnalyticalMapping':
assert bases == (AnalyticalMapping,)
for key in ['eta_symbols', 'expressions', 'default_params']:
if key not in dct.keys():
raise TypeError( "Missing attribute '{}' ".format( key ) +
"when subclassing 'AnalyticalMapping'." )
eta_symbols = sym.sympify( tuple( dct['eta_symbols'] ) )
expressions = sym.sympify( tuple( dct['expressions'] ) )
symbolic = SymbolicMapping( eta_symbols, expressions )
defaults = dct['default_params']
del dct['eta_symbols']
del dct['expressions']
del dct['default_params']
cls = super().__new__( meta, name, bases, dct )
cls._symbolic = symbolic
cls._default_params = defaults
else:
cls = super().__new__( meta, name, bases, dct )
return cls
#--------------------------------------------------------------------------
# Add class properties to any subclass of 'AnalyticMapping'
#--------------------------------------------------------------------------
@property
def symbolic( cls ):
return cls._symbolic
# ...
@property
def default_params( cls ):
return cls._default_params
#--------------------------------------------------------------------------
# Forbid instantiation of 'AnalyticMapping' base class
#--------------------------------------------------------------------------
def __call__( cls, *args, **kwargs ):
if cls.__name__ == 'AnalyticalMapping':
raise TypeError("Can't instantiate helper class 'AnalyticalMapping'")
else:
return super().__call__( *args, **kwargs )
#==============================================================================
class AnalyticalMapping( Mapping, metaclass=AnalyticalMappingMeta ):
def __init__( self, **kwargs ):
# Extract information from class
cls = type( self )
eta_symbols = tuple( cls.symbolic.eta )
params = cls.default_params.copy(); params.update( kwargs )
# Callable function: __call__
expr = sym.simplify( cls.symbolic.map.subs( params ) )
self._func_eval = sym.lambdify( [eta_symbols], expr, 'numpy' )
# Callable function: jac_mat
expr = sym.simplify( cls.symbolic.jac_mat.subs( params ) )
self._func_jac_mat = sym.lambdify( [eta_symbols], expr, 'numpy' )
# Callable function: metric
expr = sym.simplify( cls.symbolic.metric.subs( params ) )
self._func_metric = sym.lambdify( [eta_symbols], expr, 'numpy' )
# Callable function: metric_det
expr = sym.simplify( cls.symbolic.metric_det.subs( params ) )
self._func_metric_det = sym.lambdify( [eta_symbols], expr, 'numpy' )
# Store mapping parameters
self._params = params
#--------------------------------------------------------------------------
# Abstract interface
#--------------------------------------------------------------------------
def __call__( self, eta ):
return self._func_eval( eta )
def jac_mat( self, eta ):
return self._func_jac_mat( eta )
def metric( self, eta ):
return self._func_metric( eta )
def metric_det( self, eta ):
return self._func_metric_det( eta )
@property
def ldim( self ):
return type( self ).symbolic.ldim
@property
def pdim( self ):
return type( self ).symbolic.pdim
#--------------------------------------------------------------------------
# Symbolic information
#--------------------------------------------------------------------------
@property
def params( self ):
return self._params
#==============================================================================
class IdentityMapping( Mapping ):
def __init__( self, ndim ):
self._ndim = int( ndim )
#--------------------------------------------------------------------------
# Abstract interface
#--------------------------------------------------------------------------
def __call__( self, eta ):
return eta
def jac_mat( self, eta ):
return np.eye( self._ndim )
def metric( self, eta ):
return np.eye( self._ndim )
def metric_det( self, eta ):
return 1.0
@property
def ldim( self ):
return self._ndim
@property
def pdim( self ):
return self._ndim
#==============================================================================
# class AffineMap( Mapping ):
# """ Linear transformation from parametric to physical space.
# """
# def __init__( self, x0, jac_mat ):
#
# x0 = np.asarray( x0 )
# jm = np.asarray( jac_mat )
#
# # Check input data
# assert x0.ndim == 1
# assert jm.ndim == 2
# assert jm.shape[0] == x0.shape[0]
# assert jm.shape[1] >= x0.shape[0]
#
# # Number of physical and logical dimensions
# pdim, ldim = jm.shape
#
# # Components of metric tensor and matrix determinant
# metric = np.dot( jm.T, jm )
# metric_det = np.linalg.det( metric )
#
# # Store data in object
# self._x0 = x0
# self._jm = jm
# self._ldim = ldim
# self._pdim = pdim
# self._metric = metric
# self._metric_det = metric_det
#
# #--------------------------------------------------------------------------
# def __call__( self, eta ):
# return self._x0 + np.dot( self._jm, eta )
#
# # ...
# def jac_mat( self, eta ):
# return self._jm
#
# # ...
# def metric( self, eta ):
# return self._metric
#
# # ...
# def metric_det( self, eta ):
# return self._metric_det
#
# # ...
# @property
# def ldim( self ):
# return self._ldim
#
# # ...
# @property
# def pdim( self ):
# return self._pdim
|
<reponame>Stilwell-Git/Randomized-Return-Decomposition
import copy
import numpy as np
class Episode_FrameStack:
def __init__(self, info):
self.common_info = [
'obs', 'obs_next', 'frame_next',
'acts', 'rews', 'done'
]
self.ep = {
'obs': [],
'acts': [],
'rews': [],
'done': []
}
for key in info.keys():
if not(key in self.common_info):
self.ep[key] = []
self.ep_len = 0
self.sum_rews = 0.0
self.frames = info['obs'].shape[-1]
for i in range(self.frames):
self.ep['obs'].append(copy.deepcopy(info['obs'][:,:,i]))
def insert(self, info):
self.ep_len += 1
self.sum_rews += info['rews']
self.ep['obs'].append(copy.deepcopy(info['frame_next']))
self.ep['acts'].append(copy.deepcopy(info['acts']))
self.ep['rews'].append(copy.deepcopy(info['rews']))
self.ep['done'].append(copy.deepcopy(info['done']))
for key in info.keys():
if not(key in self.common_info):
self.ep[key].append(copy.deepcopy(info[key]))
def get_obs(self, idx):
idx += 1
obs = np.stack(self.ep['obs'][idx:idx+self.frames], axis=-1)
return obs.astype(np.float32)/255.0
def sample(self):
idx = np.random.randint(self.ep_len)
info = {
'obs': self.get_obs(idx-1),
'obs_next': self.get_obs(idx),
'acts': copy.deepcopy(self.ep['acts'][idx]),
'rews': [copy.deepcopy(self.ep['rews'][idx])],
'done': [copy.deepcopy(self.ep['done'][idx])]
}
for key in self.ep.keys():
if (not(key in self.common_info)) and (not(key in info.keys())):
info[key] = copy.deepcopy(self.ep[key][idx])
return info
def sample_ircr(self):
idx = np.random.randint(self.ep_len)
info = {
'obs': self.get_obs(idx-1),
'obs_next': self.get_obs(idx),
'acts': copy.deepcopy(self.ep['acts'][idx]),
'rews': [self.sum_rews], # critical step of IRCR
'done': [copy.deepcopy(self.ep['done'][idx])]
}
for key in self.ep.keys():
if (not(key in self.common_info)) and (not(key in info.keys())):
info[key] = copy.deepcopy(self.ep[key][idx])
return info
def sample_rrd(self, sample_size, store_coef=False):
idx = np.random.choice(self.ep_len, sample_size, replace=(sample_size>self.ep_len))
info = {
'rrd_obs': [],
'rrd_obs_next': [],
'rrd_acts': [],
'rrd_rews': [self.sum_rews/self.ep_len]
}
for _ in range(sample_size):
idx = np.random.randint(self.ep_len)
info['rrd_obs'].append(self.get_obs(idx-1))
info['rrd_obs_next'].append(self.get_obs(idx))
info['rrd_acts'].append(copy.deepcopy(self.ep['acts'][idx]))
if store_coef:
if (sample_size<=self.ep_len) and (self.ep_len>1):
info['rrd_var_coef'] = [1.0-float(sample_size)/self.ep_len]
else:
info['rrd_var_coef'] = [1.0 if self.ep_len>1 else 0.0]
return info
class ReplayBuffer_FrameStack:
def __init__(self, args):
self.args = args
self.in_head = True
self.ep_counter = 0
self.step_counter = 0
self.buffer_size = self.args.buffer_size
self.ep = []
self.length = 0
self.head_idx = 0
self.ram_idx = []
self.sample_batch = {
'dqn': self.sample_batch_dqn,
'ircr': self.sample_batch_ircr,
'rrd': self.sample_batch_rrd,
}[args.alg]
def store_transition(self, info):
if self.in_head:
new_ep = Episode_FrameStack(info)
self.ep.append(new_ep)
self.ep[-1].insert(info)
self.ram_idx.append(self.ep_counter)
self.length += 1
if self.length>self.buffer_size:
del_len = self.ep[0].ep_len
self.ep.pop(0)
self.head_idx += 1
self.length -= del_len
self.ram_idx = self.ram_idx[del_len:]
self.step_counter += 1
self.in_head = info['done']
if info['done']:
self.ep_counter += 1
def sample_batch_dqn(self, batch_size=-1):
if batch_size==-1: batch_size = self.args.batch_size
batch = dict(obs=[], obs_next=[], acts=[], rews=[], done=[])
for i in range(batch_size):
idx = self.ram_idx[np.random.randint(self.length)]-self.head_idx
info = self.ep[idx].sample()
for key in info.keys():
batch[key].append(info[key])
return batch
def sample_batch_ircr(self, batch_size=-1):
if batch_size==-1: batch_size = self.args.batch_size
batch = dict(obs=[], obs_next=[], acts=[], rews=[], done=[])
for i in range(batch_size):
idx = self.ram_idx[np.random.randint(self.length)]-self.head_idx
info = self.ep[idx].sample_ircr() # critical step of IRCR
for key in info.keys():
batch[key].append(info[key])
return batch
def sample_batch_rrd(self, batch_size=-1, rrd_batch_size=-1, rrd_sample_size=-1):
if batch_size==-1: batch_size = self.args.batch_size
if rrd_batch_size==-1: rrd_batch_size = self.args.rrd_batch_size
if rrd_sample_size==-1: rrd_sample_size = self.args.rrd_sample_size
batch = dict(obs=[], obs_next=[], acts=[], rews=[], done=[], rrd_obs=[], rrd_obs_next=[], rrd_acts=[], rrd_rews=[])
if self.args.rrd_bias_correction:
batch['rrd_var_coef'] = []
for i in range(batch_size):
idx = self.ram_idx[np.random.randint(self.length)]-self.head_idx
info = self.ep[idx].sample()
for key in info.keys():
batch[key].append(info[key])
for i in range(rrd_batch_size//rrd_sample_size):
idx = self.ram_idx[np.random.randint(self.length)]-self.head_idx
info = self.ep[idx].sample_rrd(rrd_sample_size, store_coef=self.args.rrd_bias_correction)
for key in info.keys():
batch[key].append(info[key])
return batch
|
<reponame>Nowasky/PerfKitBenchmarker<filename>tests/linux_benchmarks/cuda_memcpy_benchmark_test.py
# Copyright 2021 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nccl_benchmark."""
import os
import unittest
from absl import flags
import mock
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import cuda_memcpy_benchmark
from perfkitbenchmarker.linux_packages import cuda_toolkit
from perfkitbenchmarker.linux_packages import nvidia_driver
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class CudaMemcpyBenchmarkTest(pkb_common_test_case.PkbCommonTestCase,
test_util.SamplesTestMixin):
def setUp(self) -> None:
super(CudaMemcpyBenchmarkTest, self).setUp()
self.enter_context(mock.patch.object(
nvidia_driver, 'QueryNumberOfGpus', return_value=1))
self.enter_context(mock.patch.object(
cuda_toolkit, 'GetMetadata', return_value={}))
def CudaOutput(self) -> str:
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'cuda_memcpy_output.txt')
with open(path) as reader:
return reader.read()
def MockVm(self) -> mock.Mock:
vm = mock.Mock()
vm.RemoteCommandWithReturnCode.return_value = self.CudaOutput(), '', 0
return vm
@mock.patch.object(nvidia_driver, 'CheckNvidiaSmiExists', return_value=True)
def testCmd(self, check_nvidia_smi_exists: mock.Mock) -> None:
vm = self.MockVm()
cuda_memcpy_benchmark.Run(mock.Mock(vms=[vm]))
vm.RemoteCommandWithReturnCode.assert_called_with(
'/usr/local/cuda/extras/demo_suite/bandwidthTest --csv --memory=pinned '
'--mode=quick --htod --dtoh --dtod --device=0', ignore_failure=True)
@mock.patch.object(nvidia_driver, 'CheckNvidiaSmiExists', return_value=True)
def testSample(self, check_nvidia_smi_exists: mock.Mock) -> None:
samples = cuda_memcpy_benchmark.Run(mock.Mock(vms=[self.MockVm()]))
expected = sample.Sample(
'H2D-Pinned', 8494.3, 'MB/s',
{
'time': 0.00377,
'size': 33554432,
'NumDevsUsed': '1',
'device': 0,
'command':
'/usr/local/cuda/extras/demo_suite/bandwidthTest --csv '
'--memory=pinned --mode=quick --htod --dtoh --dtod --device=0',
'memory': 'pinned',
'mode': 'quick',
'htod': True,
'dtoh': True,
'dtod': True,
'wc': False,
}
)
self.assertSamplesEqualUpToTimestamp(expected, samples[0])
@mock.patch.object(nvidia_driver, 'CheckNvidiaSmiExists', return_value=False)
def testEmptySample(self, check_nvidia_smi_exists: mock.Mock) -> None:
samples = cuda_memcpy_benchmark.Run(mock.Mock(vms=[self.MockVm()]))
self.assertLen(samples, 0)
if __name__ == '__main__':
unittest.main()
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import pickle
import math
import copy
import numpy as np
import pandas as pd
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scripts.load_animals import load_animals
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
from sklearn.metrics.pairwise import rbf_kernel
from influence.inceptionModel import BinaryInceptionModel
from influence.smooth_hinge import SmoothHinge
from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS
import influence.dataset as dataset
from influence.dataset import DataSet
from influence.dataset_poisoning import generate_inception_features
def run_rbf_comparison():
num_classes = 2
num_train_ex_per_class = 900
num_test_ex_per_class = 300
dataset_name = 'dogfish_%s_%s' % (num_train_ex_per_class, num_test_ex_per_class)
image_data_sets = load_animals(
num_train_ex_per_class=num_train_ex_per_class,
num_test_ex_per_class=num_test_ex_per_class,
classes=['dog', 'fish'])
### Generate kernelized feature vectors
X_train = image_data_sets.train.x
X_test = image_data_sets.test.x
Y_train = np.copy(image_data_sets.train.labels) * 2 - 1
Y_test = np.copy(image_data_sets.test.labels) * 2 - 1
# X_train, X_test = X_train[:10], X_test[-2:]
# Y_train, Y_test = Y_train[:10], Y_test[-2:]
num_train = X_train.shape[0]
num_test = X_test.shape[0]
X_stacked = np.vstack((X_train, X_test))
gamma = 0.05
weight_decay = 0.0001
K = rbf_kernel(X_stacked, gamma = gamma / num_train)
L = slin.cholesky(K, lower=True)
L_train = L[:num_train, :num_train]
L_test = L[num_train:, :num_train]
### Compare top 5 influential examples from each network
test_idxs = range(num_test)
## RBF
input_channels = 1
weight_decay = 0.001
batch_size = num_train
initial_learning_rate = 0.001
keep_probs = None
max_lbfgs_iter = 1000
use_bias = False
decay_epochs = [1000, 10000]
tf.reset_default_graph()
# X_train = image_data_sets.train.x
# Y_train = image_data_sets.train.labels * 2 - 1
train = DataSet(L_train, Y_train)
test = DataSet(L_test, Y_test)
data_sets = base.Datasets(train=train, validation=None, test=test)
input_dim = data_sets.train.x.shape[1]
# Train with hinge
rbf_model = SmoothHinge(
temp=0,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output',
log_dir='log',
model_name='dogfish_rbf_hinge_t-0')
rbf_model.train()
hinge_W = rbf_model.sess.run(rbf_model.params)[0]
# Then load weights into smoothed version
tf.reset_default_graph()
rbf_model = SmoothHinge(
temp=0.001,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output',
log_dir='log',
model_name='dogfish_rbf_hinge_t-0.001')
params_feed_dict = {}
params_feed_dict[rbf_model.W_placeholder] = hinge_W
rbf_model.sess.run(rbf_model.set_params_op, feed_dict=params_feed_dict)
abs_weights = [abs(w) for w in hinge_W]
x_test = [X_test[i] for i in test_idxs]
y_test = [Y_test[i] for i in test_idxs]
distances, flipped_idxs = {}, {}
for test_idx in test_idxs:
x_test = X_test[test_idx, :]
y_test = Y_test[test_idx]
distances[test_idx] = dataset.find_distances(x_test, X_train)
flipped_idxs[test_idx] = Y_train != y_test
rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
influences = {}
correlation_list, margin_list = [], []
for i, test_idx in enumerate(test_idxs):
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
influences[test_idx] = rbf_predicted_loss_diffs
correlation_list.append(np.corrcoef(abs_weights, rbf_predicted_loss_diffs)[0, 1])
margin_list.append(abs(rbf_margins_test[test_idx]))
result = {
'test_idxs': test_idxs,
'distances': distances,
'flipped_idxs': flipped_idxs,
'rbf_margins_test': rbf_margins_test,
'rbf_margins_train': rbf_margins_train,
'influences': influences,
'hinge_W': hinge_W
}
pickle.dump((result, correlation_list, margin_list), open('output/rbf_results.p', 'wb')) |
#!/usr/bin/env python3
from distutils.spawn import find_executable
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import plotly.express as px
import seaborn as sns
from math import log, ceil, floor
import pandas as pd
import numpy as np
import statistics
import subprocess
import logomaker
import random
import torch
import gzip
import uuid
import sys
import re
import os
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~ OPEN FOR BUSINESS ~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AuthoR: uhlm [at] informatik [dot] uni-freiburg [dot] de
~~~~~~~~~~~~~
Run doctests
~~~~~~~~~~~~~
python3 -m doctest rplib.py
python3 -m doctest -v rplib.py
"""
################################################################################
def read_fasta_into_dic(fasta_file,
seqs_dic=False,
ids_dic=False,
dna=False,
report=1,
all_uc=False,
skip_data_id="set",
skip_n_seqs=True):
"""
Read in FASTA sequences, store in dictionary and return dictionary.
FASTA file can be plain text or gzipped (watch out for .gz ending).
>>> test_fasta = "test_data/test.fa"
>>> read_fasta_into_dic(test_fasta)
{'seq1': 'acguACGUacgu', 'seq2': 'ugcaUGCAugcaACGUacgu'}
"""
if not seqs_dic:
seqs_dic = {}
seq_id = ""
# Open FASTA either as .gz or as text file.
if re.search(".+\.gz$", fasta_file):
f = gzip.open(fasta_file, 'rt')
else:
f = open(fasta_file, "r")
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
assert seq_id not in seqs_dic, "non-unique FASTA header \"%s\" in \"%s\"" % (seq_id, fasta_file)
if ids_dic:
if seq_id in ids_dic:
seqs_dic[seq_id] = ""
else:
seqs_dic[seq_id] = ""
elif re.search("[ACGTUN]+", line, re.I):
m = re.search("([ACGTUN]+)", line, re.I)
seq = m.group(1)
if seq_id in seqs_dic:
if dna:
# Convert to DNA, concatenate sequence.
seq = seq.replace("U","T").replace("u","t")
else:
# Convert to RNA, concatenate sequence.
seq = seq.replace("T","U").replace("t","u")
if all_uc:
seq = seq.upper()
seqs_dic[seq_id] += seq
f.close()
# Check if sequences read in.
assert seqs_dic, "no sequences read in (input FASTA file \"%s\" empty or mal-formatted?)" %(fasta_file)
# If sequences with N nucleotides should be skipped.
c_skipped_n_ids = 0
if skip_n_seqs:
del_ids = []
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
if re.search("N", seq, re.I):
if report == 1:
print ("WARNING: sequence with seq_id \"%s\" in file \"%s\" contains N nucleotides. Discarding sequence ... " % (seq_id, fasta_file))
c_skipped_n_ids += 1
del_ids.append(seq_id)
for seq_id in del_ids:
del seqs_dic[seq_id]
assert seqs_dic, "no sequences remaining after deleting N containing sequences (input FASTA file \"%s\")" %(fasta_file)
if c_skipped_n_ids:
if report == 2:
print("# of N-containing %s regions discarded: %i" %(skip_data_id, c_skipped_n_ids))
return seqs_dic
################################################################################
def read_scores_into_list(scores_file):
"""
Read in scores file, where scores are stored in 2nd column.
"""
scores_list = []
with open(scores_file) as f:
for line in f:
cols = line.strip().split("\t")
scores_list.append(float(cols[1]))
f.closed
assert scores_list, "no scores read in (scores_list empty)"
return scores_list
################################################################################
def read_cat_feat_into_dic(feat_file,
feat_dic=False,
ids_dic=False,
all_uc=False):
"""
Read in categorical features formatted like FASTA sequences, e.g.
>site1
EEEEEIIIIIIIIII
IIIIIIIIIIIIIII
EEEEE
...
Into feat_dic and return.
>>> feat_file = "test_data/new_format.eia"
>>> read_cat_feat_into_dic(feat_file)
{'site1': 'EEIIIIIIIIIIIIIIEEEE', 'site2': 'EEEEIIII'}
"""
if not feat_dic:
feat_dic = {}
feat_id = ""
# Extract feature sequences.
with open(feat_file) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
feat_id = m.group(1)
assert feat_id not in feat_dic, "non-unique header \"%s\" in \"%s\"" % (feat_id, feat_file)
if ids_dic:
if feat_id in ids_dic:
feat_dic[feat_id] = ""
else:
feat_dic[feat_id] = ""
else:
if feat_id in feat_dic:
if all_uc:
feat_dic[feat_id] += line.strip().upper()
else:
feat_dic[feat_id] += line.strip().upper()
f.closed
# Check if sequences read in.
assert feat_dic, "no feature sequences read in (input file \"%s\" empty or mal-formatted?)" %(feat_file)
return feat_dic
################################################################################
def string_vectorizer(seq,
empty_vectors=False,
embed_numbers=False,
embed_one_vec=False,
custom_alphabet=False):
"""
Take string sequence, look at each letter and convert to one-hot-encoded
vector.
Return array of one-hot encoded vectors.
empty_vectors:
If empty_vectors=True, return list of empty vectors.
custom alphabet:
Supply custom alphabet list. By default RNA alphabet is used.
embed_numbers:
Instead of one-hot, print numbers in order of dictionary (1-based).
So e.g. ACGU becomes [[1], [2], [3], [4]].
>>> string_vectorizer("ACGU")
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
>>> string_vectorizer("")
[]
>>> string_vectorizer("XX")
[[0, 0, 0, 0], [0, 0, 0, 0]]
>>> string_vectorizer("ABC", empty_vectors=True)
[[], [], []]
>>> string_vectorizer("ACGU", embed_numbers=True)
[[1], [2], [3], [4]]
>>> string_vectorizer("ACGU", embed_numbers=True, embed_one_vec=True)
[1, 2, 3, 4]
"""
alphabet=['A','C','G','U']
if custom_alphabet:
alphabet = custom_alphabet
if empty_vectors:
vector = []
for letter in seq:
vector.append([])
else:
if embed_numbers:
vector = []
ab2nr_dic = {}
for idx,c in enumerate(alphabet):
ab2nr_dic[c] = idx+1
for letter in seq:
idx = ab2nr_dic[letter]
if embed_one_vec:
vector.append(idx)
else:
vector.append([idx])
else:
vector = [[1 if char == letter else 0 for char in alphabet] for letter in seq]
return vector
################################################################################
def char_vectorizer(char,
custom_alphabet=False):
"""
Vectorize given nucleotide character. Convert to uppercase before
vectorizing.
>>> char_vectorizer("C")
[0, 1, 0, 0]
>>> char_vectorizer("g")
[0, 0, 1, 0]
>>> char_vectorizer("M", ['E', 'H', 'I', 'M', 'S'])
[0, 0, 0, 1, 0]
"""
alphabet = ['A','C','G','U']
if custom_alphabet:
alphabet = custom_alphabet
char = char.upper()
l = len(char)
vector = []
assert l == 1, "given char length != 1 (given char: \"%s\")" % (l)
for c in alphabet:
if c == char:
vector.append(1)
else:
vector.append(0)
return vector
################################################################################
def update_sequence_viewpoint(seq, vp_s, vp_e):
"""
Update sequence viewpoint, i.e. region marked by vp_s (start) and vp_e
(end), converting viewpoint to uppercase, and rest to lowercase.
NOTE that vp_s and vp_e are expected to be 1-based index.
>>> seq = "acgtACGTacgt"
>>> update_sequence_viewpoint(seq, 4, 9)
'acgTACGTAcgt'
>>> seq = "acgtacgtACGTac"
>>> update_sequence_viewpoint(seq, 5, 16)
'acgtACGTACGTAC'
"""
assert seq, "seq empty"
assert vp_s <= vp_e, "vp_s > vp_e"
us_seq = seq[:vp_s-1].lower()
vp_seq = seq[vp_s-1:vp_e].upper()
ds_seq = seq[vp_e:].lower()
new_seq = us_seq + vp_seq + ds_seq
return new_seq
################################################################################
def read_str_elem_p_into_dic(str_elem_p_file,
p_to_str=False,
str_elem_p_dic=False):
"""
Read in structural elements unpaired probabilities for each sequence
position. Available structural elements:
p_unpaired, p_external, p_hairpin, p_internal, p_multiloop, p_paired
Input Format:
>sequence_id
p_unpaired<t>p_external<t>p_hairpin<t>p_internal<t>p_multiloop<t>p_paired
Read values into dictionary with sequence ID -> 2d list mapping, and
return dictionary.
p_to_str:
Read in probabilities as strings, not as float.
Example input:
>CLIP_01
0.1 0.2 0.4 0.2 0.1
0.2 0.3 0.2 0.1 0.2
Resulting dictionary:
d = {'CLIP_01': [[0.1, 0.2, 0.4, 0.2, 0.1], [0.2, 0.3, 0.2, 0.1, 0.2]]}
print(d["CLIP_01"][0])
[0.1, 0.2, 0.4, 0.2, 0.1]
print(d["CLIP_01"][0][0])
0.1
>>> str_elem_up_test = "test_data/test.elem_p.str"
>>> read_str_elem_p_into_dic(str_elem_up_test)
{'CLIP_01': [[0.1, 0.2, 0.4, 0.2, 0.1], [0.2, 0.3, 0.2, 0.1, 0.2]]}
"""
if not str_elem_p_dic:
str_elem_p_dic = {}
seq_id = ""
# Read in structural elements probabilities from file.
with open(str_elem_p_file) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
str_elem_p_dic[seq_id] = []
else:
pl = line.strip().split("\t")
if p_to_str:
str_elem_p_dic[seq_id].append(pl)
else:
fpl = [float(p) for p in pl]
str_elem_p_dic[seq_id].append(fpl)
f.closed
return str_elem_p_dic
################################################################################
def read_con_into_dic(con_file,
sc_to_str=False,
con_dic=False):
"""
Read in conservation scores (phastCons or phyloP) and store scores for
each sequence ID in a vector. Resulting dictionary:
sequence ID -> scores vector
Example.con file format:
>CLIP_01
0.1
0.2
>CLIP_02
0.4
0.5
...
con_dic:
If given, add entries to existing dictionary.
>>> con_test = "test_data/test.pp.con"
>>> read_con_into_dic(con_test)
{'CLIP_01': [0.1, 0.2], 'CLIP_02': [0.4, 0.5]}
"""
if not con_dic:
con_dic = {}
seq_id = ""
# Read in scores.
with open(con_file) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
con_dic[seq_id] = []
else:
score = line.strip()
if sc_to_str:
con_dic[seq_id].append(score)
else:
score = float(score)
if not score % 1:
score = int(score)
con_dic[seq_id].append(score)
f.closed
assert con_dic, "con_dic empty"
return con_dic
################################################################################
def mean_normalize(x, mean_x, max_x, min_x):
"""
Mean normalization of input x, given dataset mean, max, and min.
>>> mean_normalize(10, 10, 15, 5)
0.0
>>> mean_normalize(15, 20, 30, 10)
-0.25
Formula from:
https://en.wikipedia.org/wiki/Feature_scaling
"""
# If min=max, all values the same, so return x.
if (max_x - min_x) == 0:
return x
else:
return ( (x-mean_x) / (max_x - min_x) )
################################################################################
def min_max_normalize(x, max_x, min_x,
borders=False):
"""
Min-max normalization of input x, given dataset max and min.
>>> min_max_normalize(20, 30, 10)
0.5
>>> min_max_normalize(30, 30, 10)
1.0
>>> min_max_normalize(10, 30, 10)
0.0
>>> min_max_normalize(0.5, 1, 0, borders=[-1, 1])
0.0
Formula from:
https://en.wikipedia.org/wiki/Feature_scaling
"""
# If min=max, all values the same, so return x.
if (max_x - min_x) == 0:
return x
else:
if borders:
assert len(borders) == 2, "list of 2 values expected"
a = borders[0]
b = borders[1]
assert a < b, "a should be < b"
return a + (x-min_x)*(b-a) / (max_x - min_x)
else:
return (x-min_x) / (max_x - min_x)
################################################################################
def convert_graph_to_string(g):
"""
Convert graph into string of graph edges for string comparisons.
E.g. "1-2,2-3,3-5,,4-5,4-8,5-6,6-7,7-8,"
This graph has backbone edges from 1 to 8 plus one basepair edge from 4-8.
Expected output:
Oc:clodxdl'.,;:cclooddxxkkkkkOOOOOOOOOOOOOkkkkO00O00000000O0KNNNNNNNNNNNNNNNNNNN
Kl:cloddo;.';;:cloodddxxkkkOOOOOOOO00OOOOOkkkOO00000000000000KNNNNNNNNNNNNNNNNNN
Xd:clool:..,;:clloodddxxkkOOOOOOOO000OOO0OOkkkO000000000000000KNNNNNNNNNNNNNNNNN
NOccclc:'.',;::cllooddxxkkkOOOOOOOO00O0000OOOOO0000000000000000KNNNNNNNNNNNNNNNN
WKoccc:,..,;;;::ccllooddxxkkOOOOOOOOOO00000OOO000000000000OkxxdxOXNNNNNNNNNNNNNN
NNkcc:;'.',........',,;:cclodxkkkxkkkkkkkkkxxxxxxddoolcc;,'.....'oXNNNNNNNNNNNNN
NN0o::;'.. ........................... ...'';kNNNNNNNNNNNNN
NNNxc:;'. ..',,;;;,,'... .,:ldkOOOOKNNNNNNNNNNNN
NNNKo:;'.';:cloodxxkkkkkxol:,'. .;lx00KKKKKKKKKXNNNNNNNNNN
NNNNkc:;;:::clooddxxkkOOO0000Oxoc,. . .. .cx0KKKKKKKKKKXKKKXNNNNNNNNN
NNNNKdcc:;::cclloodxxxkkOOO0000KK0xl'. . .;xO0KKKKKKKKKKKKKKKKXNNNNNNNN
NNNNNOo:;;;;::cclooddxxkkkOOO000KKKK0d,. .cxO00KKKKKKKKKKKKKK00KNNNNNNNN
NNNNNXx:;;;;;::ccloooddxxkkkOO000KKKKK0l,. .:xkO00KKKKKKKKKKKK00000XNNNNNNN
NNNNNXd;;;;;;:::clloooddxxxkkOOO00KKKKK00d. .;dxkO00KKKKKKKKK00000000KXNNNNNN
NNNNNKl;;;;;;:::cclloooddxxxkkkOO000KKKKK0o. 'cdxkOO0000000000000000000XNNNNNN
NNNNNO:;;;;;;:::ccllloodddxxxkkkOO000KKKKK0: .;ldxkkOOO00000000000000000KXNNNNN
NNNNXd;;;;;;;;::cclllooodddxxxkkkOOO0000K00d..':lodxkkkOOO00000OOOOO0OO000XNNNNN
NNNN0l;;;;;;;;::ccclllooodddxxxkkkOOO000000k,.;:loddxxkkkOOOOOOOOOOOOOOOO0KNNNNN
NNNNx:;;;;;;;;::cccllloooddddxxxkkkOOOOO000k:';:clodddxxkkkOOOOOOOOOOOOOOO0XNNNN
NNNKl;;;;;;;;;:::ccllllooodddxxxxkkkkkOOOOOx:';:ccloodddxxkkOOOOOOOOOOOOOO0KNNNN
NNNk:;;;;;;;;;:::cclllloooddddxxxxxkkkkkkkkd;';::ccllooddxxkkkOOOOOOOOOOOOOKNNNN
NWKo;;;;;;;;;;:::ccllllooooddddxxxxxkkkkkxxc'',;::ccllooddxxkkkOOOOOOOOOOOO0XNNN
NN0c;;;;;;;;;;:::cclllllooooddddxxxxxxxxxxo,.',,;::ccllooddxxkkkOOOOOOOOOOO0KNNN
NNk:;;;;;;;;;;;:::ccllllloooooddddxxdddddo;..'',,;::ccclooddxxxkkkkkkOkkOOOO0XNN
WXd;;;;;::::::::::ccclllllooooodddddddooodl;''',,;;:::cclooddxxxkkkkkkkkOOOO0XNN
WKo;;;;:::::::::::cccclllloooooooooooloxOKN0c,,,,;;;:::cclloodddxxxkkkkkOOOOOKNN
W0l;;;;:::ccccccccccccclllllllllllllox0XNNWXo'',;;;;;;::cccllooddxxkkkkkOOOOO0XW
W0c;;;;:::ccccccccclllllllllllllcc:oKNNNNNWXc.....'',;;;:::ccloodxxxkkkOOOOOO0XN
W0c;;;;:::ccccllllllllooooooollccc:dXWNNNNWK:........'',;;:clloddxxkkkkOOOOOOOKN
WKl;;;;:::ccllllloooooddddddddddddoxKWNNNNWKc...'',,,;;::cllooddxxkkkkOOOOOOOOKN
WKo;;;;;::ccllllooodddddxxxxxxxxxxdxKWNWNNWNo'''',;::cccclloooddxxkkkkOOOOOOOOKN
WXd;;;;;::cclllloooddddxxxxxxxxxxxdkXWNWWWWWO:'',,;:ccllllooodddxxkkkkkOOOOOOOKW
"""
g_string = ""
for s,e in g.edges:
g_string += "%i-%i," % (s,e)
return g_string
################################################################################
def read_ids_into_dic(ids_file,
check_dic=True,
ids_dic=False):
"""
Read in IDs file, where each line stores one ID.
>>> test_ids_file = "test_data/test.ids"
>>> ids_dic = read_ids_into_dic(test_ids_file)
>>> print(ids_dic)
{'clip1': 1, 'clip2': 1, 'clip3': 1}
"""
if not ids_dic:
ids_dic = {}
# Read in file content.
with open(ids_file) as f:
for line in f:
row_id = line.strip()
ids_dic[row_id] = 1
f.closed
if check_dic:
assert ids_dic, "IDs dictionary ids_dic empty"
return ids_dic
################################################################################
def bed_convert_coords(reg_s, reg_e, ref_s, ref_e, pol):
"""
Convert BED coordinates derived from a region within a subsequence
of the reference (chromsome or transcript) to chromosome or
transcript reference coordinates. Return start + end (BED format)
positions.
reg_s:
Region within subsequence start position,
with 0 to len(subsequence) coordinates
reg_e:
Region within subsequence end position,
with 0 to len(subsequence) coordinates
ref_s:
Subsequence start position on reference sequence
ref_e:
Subsequence end position on reference sequence
pol:
Polarity on reference (use + for transcripts, + or - for
chromosomal regions)
>>> bed_convert_coords(10, 20, 1000, 2000, "+")
(1010, 1020)
>>> bed_convert_coords(10, 20, 1000, 2000, "-")
(1980, 1990)
"""
assert pol == "+" or pol == "-", "invalid polarity given"
assert reg_s < reg_e, "Invalid BED coordinates given: reg_s >= reg_e"
assert ref_s < ref_e, "Invalid BED coordinates given: ref_s >= ref_e"
new_s = ref_s + reg_s
new_e = ref_s + reg_e
if pol == "-":
new_s = ref_e - reg_e
new_e = ref_e - reg_s
return new_s, new_e
################################################################################
def list_extract_peaks(in_list,
max_merge_dist=0,
coords="list",
sc_thr=0):
"""
Extract peak regions from list.
Peak region is defined as region >= score threshold.
Return list of lists with format:
[pr_s, pr_e, pr_top_pos, pr_top_sc]
coords=list : peak start 0-based, peak end 0-based.
peak position (pr_top_pos) 0-based.
coords=bed : peak start 0-based, peak end 1-based.
peak position (pr_top_pos) also 1-based.
>>> test_list = [-1, 0, 2, 4.5, 1, -1, 5, 6.5]
>>> list_extract_peaks(test_list)
[[1, 4, 3, 4.5], [6, 7, 7, 6.5]]
>>> list_extract_peaks(test_list, sc_thr=2)
[[2, 3, 3, 4.5], [6, 7, 7, 6.5]]
>>> list_extract_peaks(test_list, sc_thr=2, coords="bed")
[[2, 4, 4, 4.5], [6, 8, 8, 6.5]]
>>> list_extract_peaks(test_list, sc_thr=10)
[]
>>> test_list = [2, -1, 3, -1, 4, -1, -1, 6, 9]
>>> list_extract_peaks(test_list, max_merge_dist=2)
[[0, 4, 4, 4], [7, 8, 8, 9]]
>>> list_extract_peaks(test_list, max_merge_dist=3)
[[0, 8, 8, 9]]
"""
# Check.
assert len(in_list), "Given list is empty"
# Peak regions list.
peak_list = []
# Help me.
inside = False
pr_s = 0
pr_e = 0
pr_top_pos = 0
pr_top_sc = -100000
for i, sc in enumerate(in_list):
# Part of peak region?
if sc >= sc_thr:
# At peak start.
if not inside:
pr_s = i
pr_e = i
inside = True
else:
# Inside peak region.
pr_e = i
# Store top position.
if sc > pr_top_sc:
pr_top_sc = sc
pr_top_pos = i
else:
# Before was peak region?
if inside:
# Store peak region.
#peak_infos = "%i,%i,%i,%f" %(pr_s, pr_e, pr_top_pos, pr_top_sc)
peak_infos = [pr_s, pr_e, pr_top_pos, pr_top_sc]
peak_list.append(peak_infos)
inside = False
pr_top_pos = 0
pr_top_sc = -100000
# If peak at the end, also report.
if inside:
# Store peak region.
peak_infos = [pr_s, pr_e, pr_top_pos, pr_top_sc]
peak_list.append(peak_infos)
# Merge peaks.
if max_merge_dist and len(peak_list) > 1:
iterate = True
while iterate:
merged_peak_list = []
added_peaks_dic = {}
peaks_merged = False
for i, l in enumerate(peak_list):
if i in added_peaks_dic:
continue
j = i + 1
# Last element.
if j == len(peak_list):
if i not in added_peaks_dic:
merged_peak_list.append(peak_list[i])
break
# Compare two elements.
new_peak = []
if (peak_list[j][0] - peak_list[i][1]) <= max_merge_dist:
peaks_merged = True
new_top_pos = peak_list[i][2]
new_top_sc = peak_list[i][3]
if peak_list[i][3] < peak_list[j][3]:
new_top_pos = peak_list[j][2]
new_top_sc = peak_list[j][3]
new_peak = [peak_list[i][0], peak_list[j][1], new_top_pos, new_top_sc]
# If two peaks were merged.
if new_peak:
merged_peak_list.append(new_peak)
added_peaks_dic[i] = 1
added_peaks_dic[j] = 1
else:
merged_peak_list.append(peak_list[i])
added_peaks_dic[i] = 1
if not peaks_merged:
iterate = False
peak_list = merged_peak_list
peaks_merged = False
# If peak coordinates should be in .bed format, make peak ends 1-based.
if coords == "bed":
for i in range(len(peak_list)):
peak_list[i][1] += 1
peak_list[i][2] += 1 # 1-base best score position too.
return peak_list
################################################################################
def is_tool(name):
"""Check whether tool "name" is in PATH."""
return find_executable(name) is not None
################################################################################
def bed_check_six_col_format(bed_file,
nr_cols=6):
"""
Check whether given .bed file has 6 columns.
>>> test_bed = "test_data/test1.bed"
>>> bed_check_six_col_format(test_bed)
True
>>> test_bed = "test_data/empty_file"
>>> bed_check_six_col_format(test_bed)
False
"""
six_col_format = False
with open(bed_file) as f:
for line in f:
cols = line.strip().split("\t")
if len(cols) == nr_cols:
six_col_format = True
break
f.closed
return six_col_format
################################################################################
def count_file_rows(in_file,
nr_cols=False):
"""
Count number of file rows. If nr_cols set, demand certain (nr_cols) number
of columns (separated by tab), in order for row to be counted.
>>> test_file = "test_data/test1.bed"
>>> count_file_rows(test_file)
7
>>> test_file = "test_data/empty_file"
>>> count_file_rows(test_file)
0
"""
c = 0
with open(in_file) as f:
for line in f:
cols = line.strip().split("\t")
if nr_cols:
if len(cols) == nr_cols:
c += 1
else:
c += 1
f.closed
return c
################################################################################
def bpp_callback(v, v_size, i, maxsize, what, data=None):
"""
This uses the Python3 API (RNA.py) of ViennaRNA (tested with v 2.4.13).
So RNA.py needs to be in PYTHONPATH (it is if installed via conda).
"""
import RNA
if what & RNA.PROBS_WINDOW_BPP:
data.extend([{'i': i, 'j': j, 'p': p} for j, p in enumerate(v) if (p is not None) and (p >= 0.01)])
################################################################################
def up_split_callback(v, v_size, i, maxsize, what, data):
"""
This uses the Python3 API (RNA.py) of ViennaRNA (tested with v 2.4.13).
So RNA.py needs to be in PYTHONPATH (it is if installed via conda).
"""
import RNA
if what & RNA.PROBS_WINDOW_UP:
what = what & ~RNA.PROBS_WINDOW_UP
dat = []
# Non-split case:
if what == RNA.ANY_LOOP:
dat = data
# all the cases where probability is split into different loop contexts
elif what == RNA.EXT_LOOP:
dat = data['ext']
elif what == RNA.HP_LOOP:
dat = data['hp']
elif what == RNA.INT_LOOP:
dat = data['int']
elif what == RNA.MB_LOOP:
dat = data['mb']
dat.append({'i': i, 'up': v})
################################################################################
def calc_str_elem_p(in_fasta, out_str,
report=True,
stats_dic=None,
id2ucr_dic=False,
plfold_u=3,
plfold_l=100,
plfold_w=150):
"""
Calculate structural elements probabilities (different loop contexts),
using ViennaRNA's RNAplfold.
This uses the Python3 API (RNA.py) of ViennaRNA (tested with v 2.4.17).
So RNA.py needs to be in PYTHONPATH, which it is,
if e.g. installed via:
conda install -c bioconda viennarna=2.4.17
NOTE that there is still a memory bug in the Python API as for 2.4.17.
The more sequences get processed, the more memory is consumed. Bug
has been reported and should be gone in the next release.
in_fasta:
Input FASTA file
out_str:
Output position-wise structural elements probabilities file
stats_dic:
If not None, extract statistics from structure data and store
in stats_dic.
plfold_u:
RNAplfold -u parameter value
plfold_l:
RNAplfold -L parameter value
plfold_w:
RNAplfold -W parameter value
"""
# ViennaRNA lib.
try:
import RNA
except:
assert False, "ViennaRNA Python3 API library RNA.py not in PYTHONPATH"
# For real.
import RNA
# Check input.
assert os.path.isfile(in_fasta), "cannot open target FASTA file \"%s\"" % (in_fasta)
# Read in FASTA file.
seqs_dic = read_fasta_into_dic(in_fasta)
# If stats dictionary given, compute statistics during run.
if stats_dic is not None:
stats_dic["seqlen_sum"] = 0
stats_dic["seq_c"] = len(seqs_dic)
pu_list = []
ps_list = []
pe_list = []
ph_list = []
pi_list = []
pm_list = []
pbp_list = []
# Output files.
OUTSTR = open(out_str,"w")
# Floor float, for centering probabilities (important when u > 1).
i_add = int(plfold_u/2)
# Sequence counter.
c_seq = 0
# Calculate base pair and structural elements probabilities.
if report:
print("Calculate structural elements probabilities ... ")
for seq_id, seq in sorted(seqs_dic.items()):
md = RNA.md()
md.max_bp_span = plfold_l
md.window_size = plfold_w
# Different loop context probabilities.
data_split = {'ext': [], 'hp': [], 'int': [], 'mb': [] }
fc = RNA.fold_compound(seq, md, RNA.OPTION_WINDOW)
# Get different loop context probabilities.
fc.probs_window(plfold_u, RNA.PROBS_WINDOW_UP | RNA.PROBS_WINDOW_UP_SPLIT, up_split_callback, data_split)
# Store individual probs for sequence in lists.
ups = []
ups_e = []
ups_h = []
ups_i = []
ups_m = []
ups_s = []
for i,e in enumerate(seq):
data_i = i + 1
p_e = 0
p_h = 0
p_i = 0
p_m = 0
if data_split['ext'][i]['up'][plfold_u]:
p_e = data_split['ext'][i]['up'][plfold_u]
if data_split['hp'][i]['up'][plfold_u]:
p_h = data_split['hp'][i]['up'][plfold_u]
if data_split['int'][i]['up'][plfold_u]:
p_i = data_split['int'][i]['up'][plfold_u]
if data_split['mb'][i]['up'][plfold_u]:
p_m = data_split['mb'][i]['up'][plfold_u]
# Total unpaired prob = sum of different loop context probs.
p_u = p_e + p_h + p_i + p_m
if p_u > 1:
p_u = 1
# Paired prob (stacked prob).
p_s = 1 - p_u
ups.append(p_u)
ups_e.append(p_e)
ups_h.append(p_h)
ups_i.append(p_i)
ups_m.append(p_m)
ups_s.append(p_s)
# Center the values and output for each sequence position.
OUTSTR.write(">%s\n" %(seq_id))
l_seq = len(seq)
if stats_dic is not None:
stats_dic["seqlen_sum"] += l_seq
for i, c in enumerate(seq):
# At start, end, and middle.
if i < i_add:
p_u = ups[plfold_u-1]
p_e = ups_e[plfold_u-1]
p_h = ups_h[plfold_u-1]
p_i = ups_i[plfold_u-1]
p_m = ups_m[plfold_u-1]
p_s = ups_s[plfold_u-1]
elif i >= (l_seq - i_add):
p_u = ups[l_seq-1]
p_e = ups_e[l_seq-1]
p_h = ups_h[l_seq-1]
p_i = ups_i[l_seq-1]
p_m = ups_m[l_seq-1]
p_s = ups_s[l_seq-1]
else:
p_u = ups[i+i_add]
p_e = ups_e[i+i_add]
p_h = ups_h[i+i_add]
p_i = ups_i[i+i_add]
p_m = ups_m[i+i_add]
p_s = ups_s[i+i_add]
# Output centered values.
pos = i+1 # one-based sequence position.
#OUTSTR.write("%i\t%f\t%f\t%f\t%f\t%f\t%f\n" %(pos,p_u,p_e,p_h,p_i,p_m,p_s))
OUTSTR.write("%f\t%f\t%f\t%f\t%f\n" %(p_e,p_h,p_i,p_m,p_s))
if stats_dic:
pu_list.append(p_u)
ps_list.append(p_s)
pe_list.append(p_e)
ph_list.append(p_h)
pi_list.append(p_i)
pm_list.append(p_m)
c_seq += 1
if report:
if not c_seq % 100:
print("%i sequences processed" %(c_seq))
OUTSTR.close()
# Calculate stats if stats_dic set.
if stats_dic:
# Mean values.
stats_dic["U"] = [statistics.mean(pu_list)]
stats_dic["S"] = [statistics.mean(ps_list)]
stats_dic["E"] = [statistics.mean(pe_list)]
stats_dic["H"] = [statistics.mean(ph_list)]
stats_dic["I"] = [statistics.mean(pi_list)]
stats_dic["M"] = [statistics.mean(pm_list)]
# Standard deviations.
stats_dic["U"] += [statistics.stdev(pu_list)]
stats_dic["S"] += [statistics.stdev(ps_list)]
stats_dic["E"] += [statistics.stdev(pe_list)]
stats_dic["H"] += [statistics.stdev(ph_list)]
stats_dic["I"] += [statistics.stdev(pi_list)]
stats_dic["M"] += [statistics.stdev(pm_list)]
################################################################################
def bed_read_rows_into_dic(in_bed,
exon_regions=False,
id2exonc_dic=None,
id2len_dic=None,
two_ids_dic=False):
"""
Read in .bed file rows into dictionary.
Mapping is region ID -> bed row.
If exon_regions=True, a set of regions defines a transcript if column 5
ID is the same for all these regions. Then by start,end + polarity,
exon IDs are added. E.g. "id" -> "id_e1", "id_e2" ...
exon_regions:
Allow column 5 IDs to be non-unique, and treated as exon regions.
This results in new IDs for these regions (_e1, _e2 ...)
id2exonc_dic:
Site ID to exon count dictionary.
two_ids_dic:
Dictionary with site ID -> sequence ID, used for filtering sites.
Thus, row has to have both site and sequence ID to be kept.
>>> id2exonc_dic = {}
>>> id2len_dic = {}
>>> id2row_dic = {}
>>> test_bed = "test_data/test2.bed"
>>> id2row_dic = bed_read_rows_into_dic(test_bed, id2exonc_dic=id2exonc_dic, exon_regions=True, id2len_dic=id2len_dic)
>>> id2exonc_dic
{'reg1': 4, 'reg2': 3, 'reg3': 1}
>>> id2len_dic
{'reg1': 2200, 'reg2': 2100, 'reg3': 60000}
"""
newid2row_dic = {}
id2pol_dic = {}
id2sc_dic = {}
id2starts_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
seq_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_l = site_e - site_s
site_id = cols[3]
site_sc = cols[4]
site_pol = cols[5]
# Store polarity, if not exon regions, check if site_id is unique.
if not exon_regions:
# If site_id non-unique.
assert site_id not in id2pol_dic, "non-unique site ID (\"%s\") found in \"%s\"" %(site_id, in_bed)
id2pol_dic[site_id] = site_pol
id2sc_dic[site_id] = site_sc
# Make new row string.
new_row = "%s\t%i\t%i" %(seq_id, site_s, site_e)
# Make new site ID, with column 5 ID + start position.
new_site_id = "%s,%i" %(site_id, site_s)
# Check whether new ID is unique.
assert new_site_id not in newid2row_dic, "non-unique site ID + start position combination (\"%s\") found in \"%s\"" %(new_site_id, in_bed)
# Store row for each new unique ID.
newid2row_dic[new_site_id] = new_row
# Store site starts list for each site ID.
if site_id not in id2starts_dic:
id2starts_dic[site_id] = [site_s]
else:
id2starts_dic[site_id].append(site_s)
# Calculate total lengths for each site_id.
if id2len_dic is not None:
if site_id in id2len_dic:
id2len_dic[site_id] += site_l
else:
id2len_dic[site_id] = site_l
f.closed
# Store sites with new site IDs.
id2row_dic = {}
for site_id in id2starts_dic:
site_pol = id2pol_dic[site_id]
# If site ID only has one .bed region.
if len(id2starts_dic[site_id]) == 1:
site_s = id2starts_dic[site_id][0]
new_site_id = "%s,%i" %(site_id, site_s)
id2row_dic[site_id] = newid2row_dic[new_site_id] + "\t%s\t%s\t%s" %(site_id, id2sc_dic[site_id], site_pol)
if id2exonc_dic is not None:
id2exonc_dic[site_id] = 1
else:
# Sort start positions ascending (+ strand) or descending (- strand).
if site_pol == "+":
id2starts_dic[site_id].sort()
else:
id2starts_dic[site_id].sort(reverse=True)
exon_c = 0
for site_s in id2starts_dic[site_id]:
new_site_id = "%s,%i" %(site_id, site_s)
row = newid2row_dic[new_site_id]
exon_c += 1
exon_id = site_id + "_e%i" %(exon_c)
id2row_dic[exon_id] = newid2row_dic[new_site_id] + "\t%s\t0\t%s" %(exon_id, site_pol)
if id2exonc_dic is not None:
if site_id in id2exonc_dic:
id2exonc_dic[site_id] += 1
else:
id2exonc_dic[site_id] = 1
return id2row_dic
################################################################################
def bed_write_row_dic_into_file(id2row_dic, out_bed,
id2out_dic=None):
"""
Write .bed row dictionary (column 5 ID as key, .bed row as string)
into .bed file.
Example dictionary:
{'reg1_e1': 'chr1\t1000\t1100\treg1_e1\t0\t+', ... }
id2out_dic:
IDs dictionary for which to output regions.
"""
assert id2row_dic, "given id2row_dic empty"
OUTBED = open(out_bed, "w")
c_out = 0
for site_id in id2row_dic:
if id2out_dic is not None:
if not site_id in id2out_dic:
continue
c_out += 1
out_row = id2row_dic[site_id] + "\n"
OUTBED.write(out_row)
OUTBED.close()
assert c_out, "nothing was output"
################################################################################
def bed_extract_sequences_from_2bit(in_bed, out_fa, in_2bit,
lc_repeats=False,
convert_to_rna=False):
"""
Extract sequences from genome (provide genome .2bit file).
twoBitToFa executable needs to be in PATH. Store extracted
sequences in out_fa.
convert_to_rna:
If true, read in extracted sequences and convert to RNA.
lc_repeats:
If True, do not convert repeat regions to uppercase and output.
>>> in_bed = "test_data/test_seq_extr.sites.bed"
>>> tmp_2bit_fa = "test_data/test_seq_extr.sites.2bit.tmp.fa"
>>> tmp_seq_fa = "test_data/test_seq_extr.sites.seq.tmp.fa"
>>> exp_fa = "test_data/test_seq_extr.sites.exp.fa"
>>> in_fa = "test_data/test_seq_extr.sequences.fa"
>>> in_2bit = "test_data/test_seq_extr.sequences.2bit"
>>> id2row_dic = bed_read_rows_into_dic(in_bed)
>>> seqs_dic = read_fasta_into_dic(in_fa, dna=True)
>>> id2seq_dic = extract_transcript_sequences(id2row_dic, seqs_dic, revcom=True)
>>> fasta_output_dic(id2seq_dic, tmp_seq_fa)
>>> bed_extract_sequences_from_2bit(in_bed, tmp_2bit_fa, in_2bit)
>>> diff_two_files_identical(tmp_seq_fa, exp_fa)
True
>>> diff_two_files_identical(tmp_2bit_fa, exp_fa)
True
"""
# Check for twoBitToFa.
assert is_tool("twoBitToFa"), "twoBitToFa not in PATH"
# Run twoBitToFa and check.
check_cmd = "twoBitToFa"
if not lc_repeats:
check_cmd += " -noMask"
check_cmd += " -bed=" + in_bed + " " + in_2bit + " " + out_fa
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "twoBitToFa is complaining:\n%s\n%s" %(check_cmd, output)
if convert_to_rna:
# Read in tmp_fa into dictionary (this also converts sequences to RNA).
seqs_dic = read_fasta_into_dic(out_fa)
# Output RNA sequences.
fasta_output_dic(seqs_dic, out_fa,
split=True)
################################################################################
def generate_random_fn(file_ending):
"""
Generate a random file name for temporary files.
"""
random_id = uuid.uuid1()
random_fn = str(random_id) + ".tmp." . file_ending
return random_fn
################################################################################
def bed_check_unique_ids(bed_file):
"""
Check whether .bed file (6 column format with IDs in column 4)
has unique column 4 IDs.
>>> test_bed = "test_data/test1.bed"
>>> bed_check_unique_ids(test_bed)
True
>>> test_bed = "test_data/test2.bed"
>>> bed_check_unique_ids(test_bed)
False
"""
check_cmd = "cut -f 4 " + bed_file + " | sort | uniq -d"
output = subprocess.getoutput(check_cmd)
if output:
return False
else:
return True
################################################################################
def bed_check_unique_ids_two_files(bed_file1, bed_file2):
"""
Check whether .bed file (6 column format with IDs in column 4)
has unique column 4 IDs.
>>> test_bed1 = "test_data/test1.bed"
>>> test_bed2 = "test_data/test4.bed"
>>> bed_check_unique_ids_two_files(test_bed1, test_bed2)
True
>>> bed_check_unique_ids_two_files(test_bed1, test_bed1)
False
"""
check_cmd = "cut -f 4 " + bed_file1 + " " + bed_file2 + " | sort | uniq -d"
output = subprocess.getoutput(check_cmd)
if output:
return False
else:
return True
################################################################################
def fasta_check_fasta_file(fasta_file):
"""
Check whether given FASTA file is in FASTA format.
>>> test_bed = "test_data/test1.bed"
>>> fasta_check_fasta_file(test_bed)
False
>>> test_fa = "test_data/test.fa"
>>> fasta_check_fasta_file(test_fa)
True
"""
# R.U.O.K ?
assert os.path.isfile(fasta_file), "cannot open fasta_file \"%s\"" % (fasta_file)
# Check first 50 FASTA entries.
check_cmd = 'grep -A 1 ">" ' + fasta_file + ' | head -100 '
output = subprocess.getoutput(check_cmd)
if not output:
return False
# Go over output.
lines = output.split("\n")
c_headers = 0
c_seqs = 0
c_other = 0
for line in lines:
if re.search(">.+", line):
c_headers += 1
elif re.search("[ACGTUN]+", line, re.I):
c_seqs += 1
else:
c_other += 1
if c_headers and c_seqs:
return True
else:
return False
################################################################################
def bed_get_region_lengths(bed_file,
id2pol_dic=None):
"""
Read in .bed file, store and return region lengths in dictionary.
key : region ID (.bed col4)
value : region length (.bed col3-col2)
Additionally, also store polarities in id2pol_dic.
>>> test_file = "test_data/test3.bed"
>>> bed_get_region_lengths(test_file)
{'CLIP1': 10, 'CLIP2': 15}
"""
id2len_dic = {}
with open(bed_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_pol = cols[5]
site_l = site_e - site_s
assert site_id not in id2len_dic, "column 4 IDs not unique in given .bed file \"%s\"" %(bed_file)
if id2pol_dic is not None:
id2pol_dic[site_id] = site_pol
id2len_dic[site_id] = site_l
f.closed
assert id2len_dic, "No IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (bed_file)
return id2len_dic
################################################################################
def get_core_id_to_part_counts_dic(ids_dic,
label_dic=None):
"""
Get core ID to part count dictionary.
E.g. for
id1_p1, id1_p2
we get:
id1 -> 2
Works for _p (split region parts) and _e (exon regions) ID extensions.
>>> ids_dic = {'id1_p1': 1, 'id1_p2': 1, 'id2': 1, 'id3_e1': 1, 'id3_e2': 1}
>>> get_core_id_to_part_counts_dic(ids_dic)
{'id1': 2, 'id2': 1, 'id3': 2}
>>> in_bed = "test_data/test_con.bed"
>>> ids_dic = bed_get_region_ids(in_bed)
>>> get_core_id_to_part_counts_dic(ids_dic)
{'site1': 1, 'site2': 1, 'site3': 2, 'site4': 2}
"""
# Checker.
assert ids_dic, "given ids_dic empty"
id2c_dic = {}
for site_id in ids_dic:
# Check if site ID is split site ID with _e or _p.
if re.search('.+_[pe]\d+$', site_id):
m = re.search('(.+)_([pe])\d+$', site_id)
core_id = m.group(1)
label = m.group(2)
if label_dic is not None:
label_dic[core_id] = label
if core_id in id2c_dic:
id2c_dic[core_id] += 1
else:
id2c_dic[core_id] = 1
else:
assert site_id not in id2c_dic, "non-unique site ID \"%s\" in ids_dic" %(site_id)
id2c_dic[site_id] = 1
# Check and litter.
assert id2c_dic, "nothing read into id2c_dic"
return id2c_dic
################################################################################
def bed_core_id_to_part_counts_dic(in_bed):
"""
Get core ID to part count dictionary.
E.g. for
id1_p1, id1_p2
we get:
id1 -> 2
Works for _p (split region parts) and _e (exon regions) ID extensions.
>>> test_bed = "test_data/test8.bed"
>>> bed_core_id_to_part_counts_dic(test_bed)
{'reg1': 1, 'reg2': 2, 'reg3': 1}
"""
id2c_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_id = cols[3]
# Check if site ID is split site ID with _e or _p.
if re.search('.+_[pe]\d+$', site_id):
m = re.search('(.+)_[pe]\d+$', site_id)
core_id = m.group(1)
if core_id in id2c_dic:
id2c_dic[core_id] += 1
else:
id2c_dic[core_id] = 1
else:
assert site_id not in id2c_dic, "non-unique site ID \"%s\" in BED file \"%s\"" %(site_id, in_bed)
id2c_dic[site_id] = 1
f.closed
assert id2c_dic, "id2c_dic empty"
return id2c_dic
################################################################################
def bed_check_for_part_ids(in_bed):
"""
Check for part IDs in given BED file in_bed.
E.g. IDs like
id1_p1, id1_p2
Works for _p (split region parts) and _e (exon regions) ID extensions.
Return True if part IDs found, else False.
>>> test_bed = "test_data/test8.bed"
>>> bed_check_for_part_ids(test_bed)
True
"""
id2c_dic = {}
part_ids_found = False
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_id = cols[3]
if re.search('.+_[pe]\d+$', site_id):
m = re.search('(.+)_[pe]\d+$', site_id)
core_id = m.group(1)
if core_id in id2c_dic:
part_ids_found = True
break
else:
id2c_dic[core_id] = 1
else:
assert site_id not in id2c_dic, "non-unique site ID \"%s\" in BED file \"%s\"" %(site_id, in_bed)
id2c_dic[site_id] = 1
f.closed
return part_ids_found
################################################################################
def extract_conservation_scores(in_bed, out_con, con_bw,
stats_dic=None,
merge_split_regions=True,
report=False):
"""
Extract conservation scores for genomic regions given as in_bed BED file.
Scores are extracted from .bigWig con_bw file, tested for phastCons
and phyloP .bigWig files, downloaded from:
http://hgdownload.cse.ucsc.edu/goldenpath/hg38/phyloP100way/hg38.phyloP100way.bw
http://hgdownload.cse.ucsc.edu/goldenpath/hg38/phastCons100way/hg38.phastCons100way.bw
Ouput conservation scores for each region position to out_con with format:
>site_id
0.01
0.03
0
...
stats_dic:
If not None, extract statistics on conservation scores and store
in stats_dic.
merge_split_regions:
If True, merge regions with IDs id1_p1, id1_p2 .. or id1_e1, id1_e2 ..
The function thus looks for IDs with _e or _p attached to core ID,
and combines the regions with incrementing numbers (p1 p2 ... ).
This should be used for split sites like transcript sites over exon
borders, or exon regions of a transcript, to get one contigious
list of conservation scores for the site / transcript.
report:
If True, output some logging information.
This function uses:
bedtools makewindows, bigWigAverageOverBed
bedtools makewindows
====================
Split in_bed regions into 1-nt regions.
Given test.bed with region:
chr1 100 105 site1 0 +
bedtools makewindows (bedtools makewindows -b test.bed -w 1 -i srcwinnum)
outputs:
chr1 100 101 site1_1
chr1 101 102 site1_2
chr1 102 103 site1_3
chr1 103 104 site1_4
chr1 104 105 site1_5
>>> in_bed = "test_data/test_con.bed"
>>> out_con = "test_data/test_con.tmp.con"
>>> exp_con = "test_data/test_con.exp.con"
>>> out2_con = "test_data/test_con2.tmp.con"
>>> exp2_con = "test_data/test_con2.exp.con"
>>> con_bw = "test_data/test_con.bw"
>>> extract_conservation_scores(in_bed, out_con, con_bw, merge_split_regions=True)
>>> diff_two_files_identical(out_con, exp_con)
True
>>> extract_conservation_scores(in_bed, out2_con, con_bw, merge_split_regions=False)
>>> diff_two_files_identical(out2_con, exp2_con)
True
"""
# Checks.
assert is_tool("bedtools"), "bedtools not in PATH"
assert is_tool("bigWigAverageOverBed"), "bigWigAverageOverBed not in PATH"
assert os.path.isfile(in_bed), "cannot open in_bed file \"%s\"" % (in_bed)
assert os.path.isfile(con_bw), "cannot open con_bw file \"%s\"" % (con_bw)
assert bed_check_unique_ids(in_bed), "in_bed \"%s\" column 4 IDs not unique" % (in_bed)
# Get region IDs, lengths, strand polarities.
id2pol_dic = {}
id2len_dic = bed_get_region_lengths(in_bed, id2pol_dic=id2pol_dic)
# Get part counts for IDs (>1 for id_p or id_e IDs) to get exons / split sites.
label_dic = {} # Split site ID to used label (e, p).
id2parts_dic = get_core_id_to_part_counts_dic(id2len_dic, label_dic=label_dic)
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
random_id = uuid.uuid1()
tmp_tab = str(random_id) + ".tmp.tab"
if stats_dic is not None:
stats_dic["mean"] = 0
stats_dic["stdev"] = 0
stats_dic["min"] = 0
stats_dic["max"] = 0
stats_dic["zero_pos"] = 0
stats_dic["total_pos"] = 0
sc_list = []
if report:
print("bedtools makewindows from input BED ... ")
# Make 1-pos bed from in_bed.
makewin_cmd = "bedtools makewindows -b %s -w 1 -i srcwinnum > %s" % (in_bed, tmp_bed)
os.system(makewin_cmd)
if report:
print("Extract conservation scores from bigWig ... ")
# Extract conservation scores from .bw.
bw_cmd = "bigWigAverageOverBed %s %s %s" %(con_bw, tmp_bed, tmp_tab)
os.system(bw_cmd)
# Region ID to score list.
id2sc_dic = {}
with open(tmp_tab) as f:
for line in f:
cols = line.strip().split("\t")
pos_id = cols[0]
sc = float(cols[3])
if sc == 0:
sc = 0
m = re.search("(.+)_(\d+)", pos_id)
reg_id = m.group(1)
# pos = m.group(2)
pol = id2pol_dic[reg_id]
if not reg_id in id2sc_dic:
id2sc_dic[reg_id] = [sc]
else:
# Reverse score list for "-".
if pol == "+":
id2sc_dic[reg_id] = id2sc_dic[reg_id] + [sc]
else:
id2sc_dic[reg_id] = [sc] + id2sc_dic[reg_id]
f.closed
if report:
print("Write scores to output file ... ")
# Write output file.
OUTCON = open(out_con,"w")
"""
For each ID output conservation scores.
In case the ID has several _p or _e, assemble the parts and output full
region scores list.
"""
# Assemble split regions.
if merge_split_regions:
for core_id in label_dic:
part_c = id2parts_dic[core_id]
label = label_dic[core_id]
merged_sc = []
for i in range(part_c):
i += 1
# Get part region ID.
region_id = core_id + "_%s%i" %(label, i)
if region_id not in id2sc_dic:
assert False, "splite site ID \"%s\" missing in id2sc_dic" %(region_i)
merged_sc += id2sc_dic[region_id]
del id2sc_dic[region_id]
assert merged_sc, "merged_sc list empty"
id2sc_dic[core_id] = merged_sc
# Output regions.
for reg_id in id2sc_dic:
OUTCON.write(">%s\n" %(reg_id))
for sc in id2sc_dic[reg_id]:
OUTCON.write("%s\n" %(sc))
# Store conservation score stats.
if stats_dic:
for sc in id2sc_dic[reg_id]:
if sc == 0:
stats_dic["zero_pos"] += 1
sc_list.append(sc)
OUTCON.close()
if stats_dic:
assert sc_list, "no scores stored in score list"
stats_dic["mean"] = statistics.mean(sc_list)
stats_dic["stdev"] = statistics.stdev(sc_list)
stats_dic["total_pos"] = len(sc_list)
stats_dic["min"] = min(sc_list)
stats_dic["max"] = max(sc_list)
# Remove tmp files.
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
if os.path.exists(tmp_tab):
os.remove(tmp_tab)
################################################################################
def bed_get_exon_intron_annotations_from_gtf(tr_ids_dic, in_bed,
in_gtf, eia_out,
stats_dic=None,
own_exon_bed=False,
split_size=60,
n_labels=False,
intron_border_labels=False):
"""
By default get exon (E) and intron (I) labels for each position in
given in_bed BED file. Get labels from in_gtf GTF, for transcripts
with IDs stored in tr_ids_dic. Output site labels to eia_out.
Raiga: "Kriegsch n paar Eia??"
tr_ids_dic:
Transcript IDs for which to extract exon+intron regions for labelling.
in_bed:
BED file with regions to label.
in_gtf:
GTF file for extracting exon/intron regions.
eia_out:
Output file with labels.
stats_dic:
If not None, extract exon-intron annotation statistics and store
in stats_dic.
own_exon_bed:
Supply own exon BED file. This disables n_labels and
intron_border_labels annotations. Also tr_ids_dic is not used anymore
for defining transcript / exon regions.
split_size:
Split size for outputting labels (FASTA style row width).
n_labels:
If True, label all positions not covered by intron or exon regions
with "N".
intron_border_labels:
If True, label intron 5' and 3' end positions (labels "T" and "F").
>>> in_bed = "test_data/test_eia.bed"
>>> in_gtf = "test_data/test_eia.gtf"
>>> out_exp1_bed = "test_data/test_eia.exp1.eia"
>>> out_exp2_bed = "test_data/test_eia.exp2.eia"
>>> out_exp3_bed = "test_data/test_eia.exp3.eia"
>>> out_exp4_bed = "test_data/test_eia.exp4.eia"
>>> out_tmp1_bed = "test_data/test_eia.tmp1.eia"
>>> out_tmp2_bed = "test_data/test_eia.tmp2.eia"
>>> out_tmp3_bed = "test_data/test_eia.tmp3.eia"
>>> out_tmp4_bed = "test_data/test_eia.tmp4.eia"
>>> tr_ids_dic = {'tr1': 1, 'tr2': 1}
>>> bed_get_exon_intron_annotations_from_gtf(tr_ids_dic, in_bed, in_gtf, out_tmp1_bed)
>>> diff_two_files_identical(out_tmp1_bed, out_exp1_bed)
True
>>> bed_get_exon_intron_annotations_from_gtf(tr_ids_dic, in_bed, in_gtf, out_tmp2_bed, n_labels=True)
>>> diff_two_files_identical(out_tmp2_bed, out_exp2_bed)
True
>>> bed_get_exon_intron_annotations_from_gtf(tr_ids_dic, in_bed, in_gtf, out_tmp3_bed, intron_border_labels=True)
>>> diff_two_files_identical(out_tmp3_bed, out_exp3_bed)
True
>>> bed_get_exon_intron_annotations_from_gtf(tr_ids_dic, in_bed, in_gtf, out_tmp4_bed, n_labels=True, intron_border_labels=True)
>>> diff_two_files_identical(out_tmp4_bed, out_exp4_bed)
True
"""
if own_exon_bed:
intron_border_labels = False
n_labels = False
exon_bed = own_exon_bed
else:
# Checker.
assert tr_ids_dic, "given dictionary tr_ids_dic empty"
random_id = uuid.uuid1()
exon_bed = str(random_id) + ".tmp.bed"
intron_bed = False
if intron_border_labels or n_labels:
random_id = uuid.uuid1()
intron_bed = str(random_id) + ".intron.tmp.bed"
random_id = uuid.uuid1()
border_bed = str(random_id) + ".border.tmp.bed"
random_id = uuid.uuid1()
merged_bed = str(random_id) + ".merged.tmp.bed"
random_id = uuid.uuid1()
tmp_out = str(random_id) + ".tmp.out"
if stats_dic is not None:
stats_dic["E"] = 0
stats_dic["I"] = 0
stats_dic["total_pos"] = 0
if n_labels:
stats_dic["N"] = 0
if intron_border_labels:
stats_dic["F"] = 0
stats_dic["T"] = 0
# Get exon (+ intron) regions from GTF.
if not own_exon_bed:
gtf_extract_exon_bed(in_gtf, exon_bed,
out_intron_bed=intron_bed,
use_ei_labels=True,
tr_ids_dic=tr_ids_dic)
# Extract intron border positions to BED.
if intron_border_labels:
bed_extract_start_end_pos(intron_bed, border_bed)
# Merge label region files for overlapping.
merge_list = []
merge_list.append(exon_bed)
if intron_bed:
merge_list.append(intron_bed)
if intron_border_labels:
merge_list.append(border_bed)
merge_files(merge_list, merged_bed)
# Get sstart + end for each site ID.
id2s_dic = {}
id2e_dic = {}
# Dictionary of lists, store position labels, init with "I" or "N".
id2labels_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
site_l = site_e - site_s
assert site_l, "invalid site length for row \"%s\" in in_bed \"%s\"" %(row, in_bed)
if n_labels:
id2labels_dic[site_id] = ["N"]*site_l
else:
id2labels_dic[site_id] = ["I"]*site_l
f.closed
assert id2s_dic, "nothing got read in. Given BED file in_bed \"%s\" empty?" %(in_bed)
# Preferred labels, i.e. do not overwrite these if present at position.
pref_labels_dic = {}
if intron_border_labels:
pref_labels_dic["F"] = 1
pref_labels_dic["T"] = 1
# Run overlap calculation to get exon overlapping regions.
intersect_params = "-s -wb"
intersect_bed_files(in_bed, merged_bed, intersect_params, tmp_out)
"""
Example output:
$ intersectBed -a sites.bed -b annot.bed -s -wb
chr1 1000 1020 site1 0 + chr1 980 1020 F 0 +
chr1 1020 1023 site1 0 + chr1 1020 1023 S 0 +
chr1 1020 1050 site1 0 + chr1 1020 1500 C 0 +
"""
with open(tmp_out) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
s = int(cols[1]) + 1 # Make one-based.
e = int(cols[2])
site_id = cols[3]
site_s = id2s_dic[site_id] + 1 # Make one-based.
site_e = id2e_dic[site_id]
site_pol = cols[5]
label = cols[9]
# + case.
if site_pol == "+":
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = i - site_s
if id2labels_dic[site_id][li] not in pref_labels_dic:
id2labels_dic[site_id][li] = label
else:
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = site_e - i
if id2labels_dic[site_id][li] not in pref_labels_dic:
id2labels_dic[site_id][li] = label
f.closed
# Output transcript region annotations to .eia file.
OUTEIA = open(eia_out,"w")
for site_id in id2labels_dic:
# List to string.
label_str = "".join(id2labels_dic[site_id])
OUTEIA.write(">%s\n" %(site_id))
for i in range(0, len(label_str), split_size):
OUTEIA.write("%s\n" %((label_str[i:i+split_size])))
# Get label statistics.
if stats_dic:
stats_dic["total_pos"] += len(label_str)
occ_labels = ["F", "T"]
for ocl in occ_labels:
if re.search("%s" %(ocl), label_str):
stats_dic[ocl] += 1
for l in label_str:
if l not in occ_labels:
stats_dic[l] += 1
OUTEIA.close()
# Remove tmp files.
if os.path.exists(exon_bed):
if not own_exon_bed:
os.remove(exon_bed)
if intron_bed:
if os.path.exists(intron_bed):
os.remove(intron_bed)
if os.path.exists(border_bed):
os.remove(border_bed)
if os.path.exists(merged_bed):
os.remove(merged_bed)
if os.path.exists(tmp_out):
os.remove(tmp_out)
################################################################################
def extract_exon_intron_labels(in_bed, exon_bed, out_labels):
"""
Overlap genomic regions .bed with exon regions .bed, and mark region
positions with "I" (intron) or "E" (exon) labels based on the overlap
with exon regions.
>>> region_bed = "test_data/test3.bed"
>>> exon_bed = "test_data/test4.bed"
>>> exp_lab = "test_data/test.exon_intron_labels"
>>> out_lab = "test_data/test.tmp.exon_intron_labels"
>>> extract_exon_intron_labels(region_bed, exon_bed, out_lab)
>>> diff_two_files_identical(out_lab, exp_lab)
True
"""
# Check.
assert is_tool("bedtools"), "bedtools not in PATH"
assert os.path.isfile(in_bed), "cannot open in_bed BED file \"%s\"" % (in_bed)
assert os.path.isfile(exon_bed), "cannot open exon_bed BED file \"%s\"" % (exon_bed)
assert bed_check_unique_ids(in_bed), "in_bed \"%s\" column 4 IDs not unique" % (in_bed)
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
# Get polarity, start, end for each site ID.
id2pol_dic = {}
id2s_dic = {}
id2e_dic = {}
# Dictionary of lists, store position labels, init with "I".
id2labels_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_pol = cols[5]
id2pol_dic[site_id] = site_pol
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
site_l = site_e - site_s
assert site_l, "invalid site length for row \"%s\" in in_bed \"%s\"" %(row, in_bed)
id2labels_dic[site_id] = ["I"]*site_l
f.closed
assert id2pol_dic, "No entries read into dictionary (input file \"%s\" empty or malformatted?)" % (in_bed)
# Run overlap calculation to get exon overlapping regions.
intersect_params = "-s"
intersect_bed_files(in_bed, exon_bed, intersect_params, tmp_bed)
with open(tmp_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
s = int(cols[1]) + 1 # Make one-based.
e = int(cols[2])
site_id = cols[3]
site_s = id2s_dic[site_id] + 1 # Make one-based.
site_e = id2e_dic[site_id]
site_pol = id2pol_dic[site_id]
# + case.
if site_pol == "+":
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = i - site_s
id2labels_dic[site_id][li] = "E"
else:
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = site_e - i
id2labels_dic[site_id][li] = "E"
f.closed
# Write labels to file.
OUTLAB = open(out_labels,"w")
for site_id in id2labels_dic:
# List to string.
label_str = "".join(id2labels_dic[site_id])
OUTLAB.write("%s\t%s\n" %(site_id, label_str))
OUTLAB.close()
# Remove tmp files.
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
################################################################################
def bed_extract_start_end_pos(in_bed, out_bed):
"""
Extract region start and end positions from given in_bed BED.
Output start and end position regions to out_bed BED.
Output column 4 IDs will be F for 5' end position
(strand info considered) and T for 3'end position.
>>> in_bed = "test_data/test_start_end.bed"
>>> out_exp_bed = "test_data/test_start_end.exp.bed"
>>> out_tmp_bed = "test_data/test_start_end.tmp.bed"
>>> bed_extract_start_end_pos(in_bed, out_tmp_bed)
>>> diff_two_files_identical(out_exp_bed, out_tmp_bed)
True
"""
OUTPOS = open(out_bed,"w")
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
s = int(cols[1])
e = int(cols[2])
pol = cols[5]
s_f = s
e_f = s + 1
s_t = e - 1
e_t = e
if pol == "-":
s_f = e - 1
e_f = e
s_t = s
e_t = s + 1
# Output border positions.
OUTPOS.write("%s\t%i\t%i\tF\t0\t%s\n" %(chr_id, s_f, e_f, pol))
OUTPOS.write("%s\t%i\t%i\tT\t0\t%s\n" %(chr_id, s_t, e_t, pol))
f.closed
OUTPOS.close()
################################################################################
def intersect_bed_files(a_file, b_file, params, out_file,
sorted_out=False):
"""
Intersect two .bed files, using intersectBed.
"""
check_cmd = "intersectBed -a " + a_file + " -b " + b_file + " " + params + " > " + out_file
if sorted_out:
check_cmd = "intersectBed -a " + a_file + " -b " + b_file + " " + params + " | " + "sort -k1,1 -k2,2n > " + out_file
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "intersectBed has problems with your input:\n%s\n%s" %(check_cmd, output)
################################################################################
def bed_intersect_count_region_overlaps(a_file, b_file,
b_f=0.75):
"""
Intersect two .bed files, count how often -a regions overlap with -b
regions. Return count dictionary (-a col4 ID -> overlap count)
intersectBed -a genes.bed -b sites.bed -s -F 0.75
chr1 1000 1050 ENSG1 0 +
chr1 1500 1550 ENSG1 0 +
>>> a_file = "test_data/test_intersect.genes.bed"
>>> b_file = "test_data/test_intersect.sites.bed"
>>> bed_intersect_count_region_overlaps(a_file, b_file)
{'ENSG1': 2, 'ENSG2': 1}
"""
count_dic = {}
params = "-s -F %.2f" %(b_f)
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_out = str(random_id) + ".intersect.tmp.out"
check_cmd = "intersectBed -a " + a_file + " -b " + b_file + " " + params + " > " + tmp_out
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "intersectBed has problems with your input:\n%s\n%s" %(check_cmd, output)
# Acquire information.
with open(tmp_out) as f:
for line in f:
cols = line.strip().split("\t")
reg_id = cols[3]
if reg_id in count_dic:
count_dic[reg_id] += 1
else:
count_dic[reg_id] = 1
f.close()
if os.path.exists(tmp_out):
os.remove(tmp_out)
assert count_dic, "no region counts read in. Possibly no overlaps?"
return count_dic
################################################################################
def diff_two_files_identical(file1, file2):
"""
Check whether two files are identical. Return true if diff reports no
differences.
>>> file1 = "test_data/file1"
>>> file2 = "test_data/file2"
>>> diff_two_files_identical(file1, file2)
True
>>> file1 = "test_data/test1.bed"
>>> diff_two_files_identical(file1, file2)
False
"""
same = True
check_cmd = "diff " + file1 + " " + file2
output = subprocess.getoutput(check_cmd)
if output:
same = False
return same
################################################################################
def fasta_read_in_ids(fasta_file,
return_type="list"):
"""
Given a .fa file, read in header IDs in order appearing in file,
and store in list.
Also works with other files containing headers like ">id"
>>> test_file = "test_data/test.fa"
>>> fasta_read_in_ids(test_file)
['seq1', 'seq2']
>>> fasta_read_in_ids(test_file, return_type="dictionary")
{'seq1': 1, 'seq2': 1}
"""
if return_type == "list":
ids = []
elif return_type == "dictionary":
ids = {}
else:
assert False, "invalid return_type set"
with open(fasta_file) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
if return_type == "list":
ids.append(seq_id)
else:
ids[seq_id] = 1
f.close()
return ids
################################################################################
def filter_bed_row_dic_output(id2row_dic, keep_ids_dic, out_bed):
"""
Filter bed rows dictionary, with key = site ID and value = bed row string.
id2row_dic Dictionary to filter with site ID -> bed row string
keep_ids_dic Dictionary storing site IDs to keep
out_bed Output .bed file
"""
# Check.
assert keep_ids_dic, "given keep_ids_dic empty"
# Write labels to file.
OUTBED = open(out_bed,"w")
c_out = 0
for site_id in id2row_dic:
if site_id in keep_ids_dic:
c_out += 1
OUTBED.write("%s\n" %(id2row_dic[site_id]))
OUTBED.close()
assert c_out, "no remaining BED rows after filtering"
################################################################################
def bed_get_region_ids(bed_file,
check_dic=True):
"""
Read in .bed file, return region/site IDs (column 5 IDs).
>>> test_file = "test_data/test3.bed"
>>> bed_get_region_ids(test_file)
{'CLIP1': 1, 'CLIP2': 1}
"""
ids_dic = {}
with open(bed_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_id = cols[3]
assert site_id not in ids_dic, "column 4 IDs not unique in given .bed file \"%s\"" %(bed_file)
ids_dic[site_id] = 1
f.closed
if check_dic:
assert ids_dic, "No IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (bed_file)
return ids_dic
################################################################################
def fasta_filter_entries(in_fasta, keep_ids_dic, out_fasta,
dna=False):
"""
Filter FASTA file, keeping entries with IDs in keep_ids_dic.
>>> in_fasta = "test_data/test.fa"
>>> out_fasta = "test_data/test.tmp.fa"
>>> exp_fasta = "test_data/test3.fa"
>>> keep_ids_dic = {'seq2'}
>>> fasta_filter_entries(in_fasta, keep_ids_dic, out_fasta, dna=True)
>>> diff_two_files_identical(out_fasta, exp_fasta)
True
"""
# Check.
assert keep_ids_dic, "given keep_ids_dic empty"
# Read in FASTA file.
fasta_dic = read_fasta_into_dic(in_fasta, ids_dic=keep_ids_dic, dna=dna)
assert fasta_dic, "fasta_dic empty after filtering"
# Write FASTA file.
OUTFA = open(out_fasta,"w")
for fa_id in fasta_dic:
OUTFA.write(">%s\n%s\n" %(fa_id, fasta_dic[fa_id]))
OUTFA.close()
################################################################################
def make_file_copy(in_file, out_file,
delete_in=False):
"""
Make a file copy by copying in_file to out_file.
"""
check_cmd = "cat " + in_file + " > " + out_file
assert in_file != out_file, "cat does not like to cat file into same file (%s)" %(check_cmd)
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "cat did not like your input (in_file: %s, out_file: %s):\n%s" %(in_file, out_file, output)
# Delete in_file.
if delete_in:
if os.path.exists(in_file):
os.remove(in_file)
################################################################################
def move_rename_file(in_file, out_file):
"""
Move / rename in_file to out_file.
"""
check_cmd = "mv " + in_file + " " + out_file
assert in_file != out_file, "mv does not like to mv file into same file (%s)" %(check_cmd)
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "mv did not like your input (in_file: %s, out_file: %s):\n%s" %(in_file, out_file, output)
################################################################################
def con_merge_exon_regions(in_con, id2exonc_dic, out_con,
id2len_dic=False):
"""
Take a conservation scores (.con) file, and merge exon regions
identified by id2exonc_dic. Output merged regions to out_con.
id2exonc_dic format: site_id (key) -> exon_count (value)
Only counts > 1 need to be merged, site_id counts == 1 are output
unchanged.
.con file format:
>site_id1
1 0 -0.101
2 0 -0.303
3 0 0.909
....
>site_id2
For exon regions we expect ID : site_id_e1, site_ide_e2 ...
If id2len_dic given, compare the (concatenated) list lengths
with the lengths in id2len_dic for sanity checking.
>>> in_con = "test_data/test2.con"
>>> out_con = "test_data/test2.tmp.con"
>>> exp_con = "test_data/test2.exp.con"
>>> id2exonc_dic = {'CLIP1': 1, 'CLIP2': 3}
>>> id2len_dic = {'CLIP1': 10, 'CLIP2': 15}
>>> con_merge_exon_regions(in_con, id2exonc_dic, out_con, id2len_dic=id2len_dic)
>>> diff_two_files_identical(out_con, exp_con)
True
"""
# Check.
assert id2exonc_dic, "given dictionary keep_ids_dic empty"
assert os.path.isfile(in_con), "cannot open in_con \"%s\"" % (in_con)
# Read in conservation scores (for each position a list of [val1, val2] ).
con_dic = {}
seq_id = ""
# Go through .con file, extract phastCons, phyloP scores for each position.
with open(in_con) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
if seq_id not in con_dic:
con_dic[seq_id] = []
else:
assert False, "non-unique ID \"%s\" in in_con \"%s\"" %(seq_id, in_con)
else:
row = line.strip()
cols = line.strip().split("\t")
con_dic[seq_id].append([cols[1], cols[2]])
f.closed
assert con_dic, "con_dic empty"
# Merge entries and write to out_con.
OUTCON = open(out_con,"w")
for site_id in id2exonc_dic:
ex_c = id2exonc_dic[site_id]
if ex_c > 1:
# Go over exons.
new_list = []
for i in range(ex_c):
i += 1
exon_id = site_id + "_e%i" %(i)
if exon_id in con_dic:
new_list += con_dic[exon_id]
else:
assert False, "exon_id \"%\" missing in con_dic" %(site_id)
if id2len_dic:
if site_id in id2len_dic:
ll = len(new_list)
sl = id2len_dic[site_id]
assert sl == ll, "lengths not identical for site_id \"%s\" (%i != %i)" %(site_id, sl, ll)
else:
assert False, "missing site_id \"%s\" in id2len_dic" %(site_id)
OUTCON.write(">%s\n" %(site_id))
pos = 0
for l in new_list:
pos += 1
OUTCON.write("%i\t%s\t%s\n" %(pos, l[0], l[1]))
elif ex_c == 1:
# For single exon / regions.
if site_id in con_dic:
if id2len_dic:
if site_id in id2len_dic:
ll = len(con_dic[site_id])
sl = id2len_dic[site_id]
assert sl == ll, "lengths not identical for site_id \"%s\" (%i != %i)" %(site_id, sl, ll)
else:
assert False, "missing site_id \"%s\" in id2len_dic" %(site_id)
OUTCON.write(">%s\n" %(site_id))
pos = 0
for l in con_dic[site_id]:
pos += 1
OUTCON.write("%i\t%s\t%s\n" %(pos, l[0], l[1]))
else:
assert False, "site_id \"%\" has ex_c = 1 but is missing in con_dic" %(site_id)
else:
assert False, "invalid ex_c (%i) given for site_id \"%s\"" %(ex_c, site_id)
OUTCON.close()
################################################################################
def fasta_merge_exon_regions(in_fa, id2exonc_dic, out_fa,
id2len_dic=False):
"""
Take a FASTA file, and merge exon regions identified by
id2exonc_dic. Output merged regions to out_fa.
id2exonc_dic format: site_id (key) -> exon_count (value)
Only counts > 1 need to be merged, site_id counts == 1 are output
unchanged.
For exon regions we expect ID : site_id_e1, site_ide_e2 ...
If id2len_dic given, compare the (concatenated) sequence lengths
with the lengths in id2len_dic for sanity checking.
>>> in_fa = "test_data/test4.fa"
>>> out_fa = "test_data/test4.tmp.fa"
>>> exp_fa = "test_data/test4.exp.fa"
>>> id2exonc_dic = {'CLIP1': 1, 'CLIP2': 3}
>>> id2len_dic = {'CLIP1': 10, 'CLIP2': 15}
>>> fasta_merge_exon_regions(in_fa, id2exonc_dic, out_fa, id2len_dic=id2len_dic)
>>> diff_two_files_identical(out_fa, exp_fa)
True
"""
# Check.
assert id2exonc_dic, "given keep_ids_dic empty"
assert os.path.isfile(in_fa), "cannot open in_fa \"%s\"" % (in_fa)
# Read in FASTA file.
fasta_dic = read_fasta_into_dic(in_fa)
assert fasta_dic, "fasta_dic empty"
# Merge sequences and write to out_fa.
OUTFA = open(out_fa,"w")
for site_id in id2exonc_dic:
ex_c = id2exonc_dic[site_id]
if ex_c > 1:
# Go over exons.
new_seq = ""
for i in range(ex_c):
i += 1
exon_id = site_id + "_e%i" %(i)
if exon_id in fasta_dic:
new_seq += fasta_dic[exon_id]
else:
assert False, "exon_id \"%\" missing in fasta_dic (possibly sequence extraction from .2bit failed for this region)" %(site_id)
if id2len_dic:
if site_id in id2len_dic:
ll = len(new_seq)
sl = id2len_dic[site_id]
assert sl == ll, "lengths not identical for site_id \"%s\" (%i != %i)" %(site_id, sl, ll)
else:
assert False, "missing site_id \"%s\" in id2len_dic" %(site_id)
OUTFA.write(">%s\n%s\n" %(site_id, new_seq))
elif ex_c == 1:
# For single exon / regions.
if site_id in fasta_dic:
if id2len_dic:
if site_id in id2len_dic:
ll = len(fasta_dic[site_id])
sl = id2len_dic[site_id]
assert sl == ll, "lengths not identical for site_id \"%s\" (%i != %i)" %(site_id, sl, ll)
else:
assert False, "missing site_id \"%s\" in id2len_dic" %(site_id)
OUTFA.write(">%s\n%s\n" %(site_id, fasta_dic[site_id]))
else:
assert False, "site_id \"%\" has ex_c = 1 but is missing in fasta_dic (possibly sequence extraction from .2bit failed for this region)" %(site_id)
else:
assert False, "invalid ex_c (%i) given for site_id \"%s\"" %(ex_c, site_id)
OUTFA.close()
################################################################################
def exon_intron_labels_merge_exon_regions(in_file, id2exonc_dic, out_file,
id2len_dic=False):
"""
Take a .exon_intron_labels file, and merge exon regions identified by
id2exonc_dic. Output merged regions to out_file.
id2exonc_dic format: site_id (key) -> exon_count (value)
Only counts > 1 need to be merged, site_id counts == 1 are output
unchanged.
.exon_intron_labels file format:
CLIP1 IIIIIIEEEE
CLIP2 EEEEEIIIIIIIIII
....
For exon regions we expect ID : site_id_e1, site_ide_e2 ...
If id2len_dic given, compare the (concatenated) label string lengths
with the lengths in id2len_dic for sanity checking.
>>> in_file = "test_data/test2.exon_intron_labels"
>>> out_file = "test_data/test2.tmp.exon_intron_labels"
>>> exp_file = "test_data/test2.exp.exon_intron_labels"
>>> id2exonc_dic = {'CLIP1': 1, 'CLIP2': 3}
>>> id2len_dic = {'CLIP1': 10, 'CLIP2': 15}
>>> exon_intron_labels_merge_exon_regions(in_file, id2exonc_dic, out_file, id2len_dic=id2len_dic)
>>> diff_two_files_identical(out_file, exp_file)
True
"""
# Check.
assert id2exonc_dic, "given keep_ids_dic empty"
assert os.path.isfile(in_file), "ERROR: Cannot open in_file \"%s\"" % (in_file)
# Read in .exon_intron_labels file into dictionary.
id2labels_dic = {}
with open(in_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_id = cols[0]
labels = cols[1]
assert site_id not in id2labels_dic, "column 1 IDs not unique in given .exon_intron_labels file \"%s\"" %(in_file)
id2labels_dic[site_id] = labels
f.closed
OUTLABELS = open(out_file,"w")
for site_id in id2exonc_dic:
ex_c = id2exonc_dic[site_id]
if ex_c > 1:
# Go over exons.
new_labels = ""
for i in range(ex_c):
i += 1
exon_id = site_id + "_e%i" %(i)
if exon_id in id2labels_dic:
new_labels += id2labels_dic[exon_id]
else:
assert False, "exon_id \"%\" missing in id2labels_dic" %(site_id)
if id2len_dic:
if site_id in id2len_dic:
ll = len(new_labels)
sl = id2len_dic[site_id]
assert sl == ll, "lengths not identical for site_id \"%s\" (%i != %i)" %(site_id, sl, ll)
else:
assert False, "missing site_id \"%s\" in id2len_dic" %(site_id)
OUTLABELS.write("%s\t%s\n" %(site_id, new_labels))
elif ex_c == 1:
# For single exon / regions.
if site_id in id2labels_dic:
if id2len_dic:
if site_id in id2len_dic:
ll = len(id2labels_dic[site_id])
sl = id2len_dic[site_id]
assert sl == ll, "lengths not identical for site_id \"%s\" (%i != %i)" %(site_id, sl, ll)
else:
assert False, "missing site_id \"%s\" in id2len_dic" %(site_id)
OUTLABELS.write("%s\t%s\n" %(site_id, id2labels_dic[site_id]))
else:
assert False, "site_id \"%\" has ex_c = 1 but is missing in id2labels_dic" %(site_id)
else:
assert False, "invalid ex_c (%i) given for site_id \"%s\"" %(ex_c, site_id)
OUTLABELS.close()
################################################################################
def exon_intron_labels_read_in_ids(in_file):
"""
Given a .exon_intron_labels file, read in header IDs in dictionary.
>>> test_file = "test_data/test.exon_intron_labels"
>>> exon_intron_labels_read_in_ids(test_file)
{'CLIP1': 1, 'CLIP2': 1}
"""
# Check.
assert os.path.isfile(in_file), "cannot open in_file \"%s\"" % (in_file)
# Read in IDs.
ids_dic = {}
with open(in_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_id = cols[0]
assert site_id not in ids_dic, "column 1 IDs not unique in given .exon_intron_labels file \"%s\"" %(in_file)
ids_dic[site_id] = 1
f.closed
return ids_dic
################################################################################
def get_chromosome_lengths_from_2bit(in_2bit, out_lengths,
std_chr_filter=False):
"""
Get chromosome lengths from in_2bit .2bit file. Write lengths
to out_lengths, with format:
chr1 248956422
chr10 133797422
chr11 135086622
...
Also return a dictionary with key=chr_id and value=chr_length.
std_chr_filter:
Filter / convert chromosome IDs with function check_convert_chr_id(),
removing non-standard chromosomes, and convert IDs like 1,2,X,MT ..
to chr1, chr2, chrX, chrM.
"""
# Check for twoBitInfo.
assert is_tool("twoBitInfo"), "twoBitInfo not in PATH"
# Run twoBitInfo and check.
check_cmd = "twoBitInfo " + in_2bit + " " + out_lengths
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "twoBitInfo is complaining:\n%s\n%s" %(check_cmd, output)
# Read in lengths into dictionary.
chr_len_dic = {}
with open(out_lengths) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
chr_l = int(cols[1])
# Check ID.
if std_chr_filter:
new_chr_id = check_convert_chr_id(chr_id)
# If not standard chromosome ID or conversion failed, skip.
if not new_chr_id:
continue
else:
chr_id = new_chr_id
assert chr_id not in chr_len_dic, "non-unique chromosome ID \"%s\" encountered in \"%s\"" %(chr_id, out_lengths)
chr_len_dic[chr_id] = chr_l
f.closed
assert chr_len_dic, "chr_len_dic empty (\"%s\" empty? Chromosome IDs filter activated?)" %(out_lengths)
return chr_len_dic
################################################################################
def get_center_position(start, end):
"""
Get center position (1-based), given a (genomic) start (0-based) and
end coordinate (1-based).
>>> get_center_position(10, 11)
11
>>> get_center_position(1000,2000)
1501
>>> get_center_position(11, 20)
17
"""
# If region has length of 1, return end position.
center_pos = end
# Otherwise calculate new center position.
if not end - start == 1:
center_pos = round( ( (end - start) / 2 ) + start ) + 1
return center_pos
################################################################################
def bed_merge_file(in_bed, out_bed,
custom_params_str=False):
"""
Use mergeBed from bedtools to merge overlapping .bed entries, storing
the region IDs to later pick one region for each set of overlapping
regions.
>>> in_bed = "test_data/test.sorted.bed"
>>> out_bed = "test_data/test.sorted.merged.tmp.bed"
>>> out_exp_bed = "test_data/test.sorted.merged.exp.bed"
>>> bed_merge_file(in_bed, out_bed)
>>> diff_two_files_identical(out_bed, out_exp_bed)
True
"""
# Check for bedtools.
assert is_tool("bedtools"), "bedtools not in PATH"
# Parameter string.
params_str = '-s -c 4 -o distinct -delim ";"'
if custom_params_str:
params_str = custom_params_str
check_cmd = "mergeBed -i " + in_bed + " " + params_str + " > " + out_bed
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "mergeBed is complaining:\n%s\n%s" %(check_cmd, output)
################################################################################
def bed_sort_file(in_bed, out_bed,
custom_params_str=False):
"""
Use command line sort to sort the in_bed .bed file. Output sorted .bed
file to out_bed.
"""
# Parameter string.
params_str = '-k1,1 -k2,2n'
if custom_params_str:
params_str = custom_params_str
check_cmd = "sort " + params_str + " " + in_bed + " > " + out_bed
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "sort is complaining:\n%s\n%s" %(check_cmd, output)
################################################################################
def bed_sort_merge_output_top_entries(in_bed, out_bed,
rev_filter=False):
"""
Sort in_bed file, use mergeBed from bedtools to merge overlapping entries,
then select for each overlapping set the entry with highest score and
output it to out_bed.
>>> in_bed = "test_data/test5.bed"
>>> out_bed = "test_data/test5.tmp.bed"
>>> exp_bed = "test_data/test5.exp.bed"
>>> bed_sort_merge_output_top_entries(in_bed, out_bed)
>>> diff_two_files_identical(out_bed, exp_bed)
True
"""
assert os.path.isfile(in_bed), "cannot open in_bed \"%s\"" % (in_bed)
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
# Read in_bed rows into dictionary.
id2row_dic = bed_read_rows_into_dic(in_bed)
# Get region scores.
id2sc_dic = bed_get_region_id_scores(in_bed)
# Sort file.
bed_sort_file(in_bed, out_bed)
# Merge .bed.
bed_merge_file(out_bed, tmp_bed)
# Output file.
OUTBED = open(out_bed,"w")
# Open merged .bed file, and select top entry for each overlap set.
with open(tmp_bed) as f:
for line in f:
cols = line.strip().split("\t")
ids = cols[3].split(";")
best_id = "-"
best_sc = -666666
if rev_filter:
best_sc = 666666
for site_id in ids:
assert site_id in id2sc_dic, "site ID \"%s\" not found in id2sc_dic" % (site_id)
site_sc = id2sc_dic[site_id]
if rev_filter:
if site_sc < best_sc:
best_sc = site_sc
best_id = site_id
else:
if site_sc > best_sc:
best_sc = site_sc
best_id = site_id
assert best_id in id2row_dic, "site ID \"%s\" not found in id2row_dic" % (best_id)
OUTBED.write(id2row_dic[best_id] + "\n")
f.closed
OUTBED.close()
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
################################################################################
def bed_get_score_to_count_dic(in_bed):
"""
Given an .bed file in_bed, store scores and count how many times each
score appears. Return dictionary with score -> count mapping.
>>> in_bed = "test_data/test1.bed"
>>> bed_get_score_to_count_dic(in_bed)
{'1': 2, '0': 2, '2': 1, '3': 2}
"""
assert os.path.isfile(in_bed), "cannot open in_bed \"%s\"" % (in_bed)
# Read in IDs.
sc2c_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_sc = cols[4]
if site_sc in sc2c_dic:
sc2c_dic[site_sc] += 1
else:
sc2c_dic[site_sc] = 1
f.closed
return sc2c_dic
################################################################################
def bed_get_region_id_scores(in_bed, no_float=False):
"""
Read in .bed file, and store scores for each region in dictionary
(unique column 4 ID and column 5 score have to be present).
Return dictionary with mappings region ID -> region score
>>> test_bed = "test_data/test5.bed"
>>> bed_get_region_id_scores(test_bed)
{'CLIP2': 2.57, 'CLIP1': 1.58, 'CLIP3': 3.11}
"""
id2sc_dic = {}
# Open input .bed file.
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_id = cols[3]
site_sc = float(cols[4])
if no_float:
site_sc = cols[4]
id2sc_dic[site_id] = site_sc
f.closed
assert id2sc_dic, "nothing read in for in_bed \"%s\"" %(in_bed)
return id2sc_dic
################################################################################
def bed_process_bed_file(in_bed_file, out_bed_file,
new2oldid_dic=None,
score_thr=None,
min_len=False,
max_len=False,
generate_unique_ids=False,
center_sites=False,
ext_lr=False,
seq_len_dic=False,
siteids2keep_dic=False,
seqids2keep_dic=False,
siteseqids2keep_dic=False,
zero_scores=False,
int_whole_nr=True,
rev_filter=False,
id_prefix="CLIP"):
"""
Process .bed file in various ways:
- Filter by region length (min_len, max_len) or region score (column 5)
(score_thr). By default no score or length filtering is applied.
- Option to reverse-filter scores (the lower score the better)
- Center regions (center_sites=True)
- Extend sites up- downstream (ext_lr=value)
- Generate new region IDs (column 4, generate_unique_ids=True),
optionally providing an id_prefix
- Filter by given dictionary of region IDs (ids2keep_dic)
- Print "0" scores to column 5 (zero_scores)
Output processed .bed file (in_bed_file) to new bed file (out_bed_file)
>>> in_bed = "test_data/test1.bed"
>>> out_bed = "test_data/out.tmp.bed"
>>> bed_process_bed_file(in_bed, out_bed, score_thr=1)
>>> count_file_rows(out_bed)
5
>>> bed_process_bed_file(in_bed, out_bed, rev_filter=True, score_thr=1)
>>> count_file_rows(out_bed)
4
>>> in_bed = "test_data/test5.bed"
>>> out_bed = "test_data/out.tmp.bed"
>>> out_bed_exp = "test_data/test5.centered_zero_sc.bed"
>>> bed_process_bed_file(in_bed, out_bed, zero_scores=True, center_sites=True)
>>> diff_two_files_identical(out_bed, out_bed_exp)
True
"""
# Output .bed file.
OUTBED = open(out_bed_file,"w")
# New site IDs.
site_id_pref = id_prefix
c_sites = 0
# Open input .bed file.
with open(in_bed_file) as f:
for line in f:
cols = line.strip().split("\t")
seq_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_sc = float(cols[4])
site_pol = cols[5]
site_l = site_e - site_s
# Sanity checking .bed file.
assert site_s < site_e, "invalid region coordinates in .bed file \"%s\" (start >= end: %i >= %i)" % (in_bed_file, site_s, site_e)
assert site_s >= 0 and site_e >= 1, "invalid region coordinates in .bed file \"%s\" (start < 0 or end < 1)" % (in_bed_file)
# Filter by IDs to keep dictionary.
if siteids2keep_dic:
if not site_id in siteids2keep_dic:
continue
if seqids2keep_dic:
if not seq_id in seqids2keep_dic:
continue
if siteseqids2keep_dic:
if site_id in siteseqids2keep_dic:
if not seq_id == siteseqids2keep_dic[site_id]:
continue
# Filter by score.
if score_thr is not None:
if rev_filter:
if site_sc > score_thr:
continue
else:
if site_sc < score_thr:
continue
# Check whether score is whole number.
if int_whole_nr:
if not site_sc % 1:
site_sc = int(site_sc)
# Filter by minimum site length.
if min_len:
if min_len > site_l:
continue
# Filter by maximum site length.
if max_len:
if max_len < site_l:
continue
# Update start + end positions.
new_s = site_s
new_e = site_e
# Center sites (get center position).
if center_sites:
new_e = get_center_position(site_s, site_e)
new_s = new_e - 1
# If site extension is specified.
if ext_lr:
new_s = new_s - ext_lr
new_e = new_e + ext_lr
if new_s < 0:
new_s = 0
# New site ID.
if generate_unique_ids:
c_sites += 1
new_site_id = "%s_%i" % (site_id_pref, c_sites)
# If new ID to old ID mapping should be generated.
if new2oldid_dic is not None:
new2oldid_dic[new_site_id] = site_id
site_id = new_site_id
new_sc = str(site_sc)
if zero_scores:
new_sc = "0"
if seq_len_dic:
assert seq_id in seq_len_dic, "sequence ID \"%s\" missing in given sequence lengths dictionary" %(seq_id)
if new_e > seq_len_dic[seq_id]:
new_e = seq_len_dic[seq_id]
# Output to new file.
OUTBED.write("%s\t%i\t%i\t%s\t%s\t%s\n" % (seq_id,new_s,new_e,site_id,new_sc,site_pol))
f.closed
OUTBED.close()
################################################################################
def add_bed_output_ids_to_dic(output, ids_dic):
"""
Add column 4 IDs from a BED output on command line (e.g. from
cat test.bed) to ids_dic.
>>> check_cmd = "cat test_data/test3.bed"
>>> output = subprocess.getoutput(check_cmd)
>>> ids_dic = {}
>>> add_bed_output_ids_to_dic(output, ids_dic)
>>> ids_dic
{'CLIP1': 1, 'CLIP2': 1}
"""
assert output, "no output given"
ol = output.strip().split("\n")
for r in ol:
m = re.search('.+?\t\d+\t\d+\t(.+?)\t', r)
ids_dic[m.group(1)] = 1
################################################################################
def check_random_negatives(in_bed, incl_bed, excl_bed, chr_lengths_file,
trouble_ids_dic=None,
report=False):
"""
Check whether bedtools shuffle works as described.
I.e., we demand the random negatives to fully overlap with incl_bed and with
chromosomes / reference regions (chr_lengths_file). For regions where this is
not the case, print a warning + output the regions.
"""
# Return warning.
warning = False
# tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
# Read in chromosome lengths.
chr_len_dic = {}
with open(chr_lengths_file) as f:
for line in f:
cols = line.strip().split("\t")
chr_id = cols[0]
chr_l = int(cols[1])
chr_len_dic[chr_id] = chr_l
f.closed
assert chr_len_dic, "no chromsome lengths read in from chr_lengths_file"
# Generate BED from chromosome lengths.
bed_sequence_lengths_to_bed(chr_len_dic, tmp_bed)
# Get sites that overlap partially or not at all.
params = "-f 1 -v"
# Check reference regions.
check_cmd = "intersectBed -a " + in_bed + " -b " + tmp_bed + " " + params
output = subprocess.getoutput(check_cmd)
if output:
if report:
print("WARNING: random negative regions encountered that do not fully overlap with reference regions:\n%s" %(output))
if trouble_ids_dic is not None:
add_bed_output_ids_to_dic(output, trouble_ids_dic)
warning = True
# Check inclusion regions.
params = "-s -f 1 -v"
check_cmd = "intersectBed -a " + in_bed + " -b " + incl_bed + " " + params
output = subprocess.getoutput(check_cmd)
if output:
if report:
print("WARNING: random negative regions encountered that do not fully overlap with -incl regions:\n%s" %(output))
if trouble_ids_dic is not None:
add_bed_output_ids_to_dic(output, trouble_ids_dic)
warning = True
# Check exclusion regions, throw error here (not tolerable).
params = "-s"
check_cmd = "intersectBed -a " + in_bed + " -b " + excl_bed + " " + params
output = subprocess.getoutput(check_cmd)
if output:
assert False, "random negative regions encountered that overlap with -excl regions!"
#assert False, "ERROR: random negative regions encountered that overlap with -excl regions!\n%s\n%s" %(check_cmd, output)
# Delete tmp files.
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
# Return True if warning occured.
return warning
################################################################################
def bed_generate_random_negatives(in_bed, chr_sizes_file, out_bed,
incl_bed=False,
excl_bed=False,
allow_overlaps=False):
"""
Shuffle given in_bed, generating random negative regions. Optionally,
the regions to extract negatives from can be controlled by incl_bed
and excl_bed.
in_bed:
.bed file containing regions to shuffle, i.e., generate same number
of random negatives (with same size distribution too)
chr_sizes_file:
File that stores chromosome IDs and their sizes
out_bed:
Output random negative regions in out_bed
incl_bed:
Regions from which to extract random negatives
excl_bed:
Regions from which no random negatives should be extracted
allow_overlaps:
Allow random negatives to overlap with each other
Returns:
Function returns True if no error occured.
If loci error occured, function returns False.
Any other error will throw an assertion error.
If it is not possible to get the number of random negatives with the given
restrictions, bedtools shuffle will throw the following error:
Error, line 3: tried 1000 potential loci for entry, but could not avoid
excluded regions. Ignoring entry and moving on.
This error will be thrown for every failed attempt to find a random
negative for a certain positive instance.
Tool: bedtools shuffle (aka shuffleBed)
Version: v2.29.0
Summary: Randomly permute the locations of a feature file among a genome.
Usage: bedtools shuffle [OPTIONS] -i <bed/gff/vcf> -g <genome>
Options:
-excl A BED/GFF/VCF file of coordinates in which features in -i
should not be placed (e.g. gaps.bed).
-incl Instead of randomly placing features in a genome, the -incl
options defines a BED/GFF/VCF file of coordinates in which
features in -i should be randomly placed (e.g. genes.bed).
Larger -incl intervals will contain more shuffled regions.
This method DISABLES -chromFirst.
-chrom Keep features in -i on the same chromosome.
- By default, the chrom and position are randomly chosen.
- NOTE: Forces use of -chromFirst (see below).
-seed Supply an integer seed for the shuffling.
- By default, the seed is chosen automatically.
- (INTEGER)
-f Maximum overlap (as a fraction of the -i feature) with an -excl
feature that is tolerated before searching for a new,
randomized locus. For example, -f 0.10 allows up to 10%
of a randomized feature to overlap with a given feature
in the -excl file. **Cannot be used with -incl file.**
- Default is 1E-9 (i.e., 1bp).
- FLOAT (e.g. 0.50)
-chromFirst
Instead of choosing a position randomly among the entire
genome (the default), first choose a chrom randomly, and then
choose a random start coordinate on that chrom. This leads
to features being ~uniformly distributed among the chroms,
as opposed to features being distribute as a function of chrom size.
-bedpe Indicate that the A file is in BEDPE format.
-maxTries
Max. number of attempts to find a home for a shuffled interval
in the presence of -incl or -excl.
Default = 1000.
-noOverlapping
Don't allow shuffled intervals to overlap.
-allowBeyondChromEnd
Allow shuffled intervals to be relocated to a position
in which the entire original interval cannot fit w/o exceeding
the end of the chromosome. In this case, the end coordinate of the
shuffled interval will be set to the chromosome's length.
By default, an interval's original length must be fully-contained
within the chromosome.
"""
# Check for bedtools.
assert is_tool("bedtools"), "bedtools not in PATH"
# Construct call.
check_cmd = "bedtools shuffle "
if excl_bed:
check_cmd = check_cmd + "-excl " + excl_bed + " "
if incl_bed:
check_cmd = check_cmd + "-incl " + incl_bed + " "
if not allow_overlaps:
check_cmd = check_cmd + "-noOverlapping "
check_cmd = check_cmd + "-i " + in_bed + " -g " + chr_sizes_file + " > " + out_bed
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
# Look for "tried 1000 potential loci" error.
if error:
if re.search("potential loci", output):
print("WARNING: number of extracted random negatives < requested number")
return False
else:
assert False, "bedtools shuffle is complaining:\n%s\n%s" %(check_cmd, output)
else:
return True
################################################################################
def merge_files(files_list, out_file):
"""
Merge list of files into one output file.
"""
assert files_list, "given files_list is empty"
# Delete out_file if exists.
if os.path.exists(out_file):
os.remove(out_file)
for f in files_list:
assert os.path.isfile(f), "list file \"%s\" not found" % (f)
assert f != out_file, "cat does not like to cat file into same file (%s)" %(check_cmd)
check_cmd = "cat " + f + " >> " + out_file
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "cat did not like your input (in_file: %s, out_file: %s):\n%s" %(f, out_file, output)
################################################################################
def head_file_to_new(in_file, out_file, head_c):
"""
Select top head_c rows from in_file and copy to out_file via head.
>>> in_file = "test_data/test2.bed"
>>> exp_file = "test_data/test2.exp.bed"
>>> out_file = "test_data/test2.tmp.bed"
>>> head_file_to_new(in_file, out_file, head_c=4)
>>> diff_two_files_identical(out_file, exp_file)
True
"""
assert os.path.isfile(in_file), "in_file \"%s\" not found" % (in_file)
assert head_c > 0, "# top rows to select should be > 0"
check_cmd = "head -" + str(head_c) + " " + in_file + " > " + out_file
assert in_file != out_file, "head does not like to head file into same file (%s)" %(check_cmd)
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "head did not like your input (in_file: %s, out_file: %s):\n%s" %(in_file, out_file, output)
################################################################################
def fasta_output_dic(fasta_dic, fasta_out,
split=False,
split_size=60):
"""
Output FASTA sequences dictionary (sequence_id -> sequence) to fasta_out.
split Split FASTA sequence for output to file
split_size Split size
>>> fasta_dic = {'seq1': 'ACGTACGTACGTAC', 'seq2': 'ACGT'}
>>> split_size = 4
>>> fasta_exp = "test_data/test5.exp.fa"
>>> fasta_out = "test_data/test5.tmp.fa"
>>> fasta_output_dic(fasta_dic, fasta_out, split=True, split_size=split_size)
>>> diff_two_files_identical(fasta_exp, fasta_out)
True
"""
# Check.
assert fasta_dic, "given dictionary fasta_dic empty"
# Write sequences to FASTA file.
OUTFA = open(fasta_out,"w")
for seq_id in fasta_dic:
seq = fasta_dic[seq_id]
if split:
OUTFA.write(">%s\n" %(seq_id))
for i in range(0, len(seq), split_size):
OUTFA.write("%s\n" %((seq[i:i+split_size])))
else:
OUTFA.write(">%s\n%s\n" %(seq_id, seq))
OUTFA.close()
################################################################################
def dic_remove_entries(in_dic, filter_dic):
"""
Remove entries from in_dic dictionary, given key values from filter_dic.
>>> in_dic = {'id1': 10, 'id2': 15, 'id3':20}
>>> filter_dic = {'id2' : 1}
>>> dic_remove_entries(in_dic, filter_dic)
{'id1': 10, 'id3': 20}
"""
# Checks.
assert in_dic, "given dictionary in_dic empty"
assert filter_dic, "given dictionary filter_dic empty"
# Filter.
for filter_id in filter_dic:
if filter_id in in_dic:
del in_dic[filter_id]
return in_dic
################################################################################
def gtf_extract_unique_exon_bed(in_gtf, out_bed,
use_ei_labels=False):
"""
Given a .gtf file with exon features, extract exon unique (!) regions.
Since the Ensembl exon_id regions are not unique regarding their genomic
coordinates, create own IDs each representing one unique genomic region
(unique start+end+strand info).
Output .bed will look like this (column 4 ID == new exon ID):
chr1 1000 2000 NEXT1 0 +
chr1 3000 4000 NEXT2 0 +
chr1 8000 9000 NEXT3 0 -
chr1 6000 7000 NEXT4 0 -
...
use_ei_labels:
Instead of using exon ID, just print "E" in column 4.
"""
# Store exon ID region data.
reg_str_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "exon":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Store exon data.
check_reg_str = "%s,%i,%i,%s" %(chr_id,feat_s,feat_e,feat_pol)
reg_str_dic[check_reg_str] = 1
f.close()
# Output genomic exon regions.
OUTBED = open(out_bed, "w")
assert reg_str_dic, "no exon regions read in"
c_ex = 0
for reg_str in reg_str_dic:
cols = reg_str.split(",")
c_ex += 1
ex_id = "NEXT" + str(c_ex)
if use_ei_labels:
ex_id = "E"
OUTBED.write("%s\t%s\t%s\t%s\t0\t%s\n" % (cols[0], cols[1], cols[2], ex_id, cols[3]))
OUTBED.close()
################################################################################
def gtf_extract_exon_bed(in_gtf, out_bed,
out_intron_bed=False,
use_ei_labels=False,
tr_ids_dic=False):
"""
Given a .gtf file with exon features, extract exon regions and store in
.bed file. Optionally, a dictionary of transcript IDs can be provided,
meaning that only exon regions from the given transcripts will be extracted.
If out_intron_bed is set, an intronic regions .bed file will also be
extracted, based on the exonic regions .bed information.
Output .bed will look like this (note column 4 ID format with transcript
ID followed by _e+exon_number):
chr1 1000 2000 ENST001_e1 0 +
chr1 3000 4000 ENST001_e2 0 +
chr1 8000 9000 ENST002_e1 0 -
chr1 6000 7000 ENST002_e2 0 -
...
use_ei_labels:
Instead of using transcript ID + eX column 4 BED ID, just use "E" for
exon region and "I" for intron region (if out_intron_bed) set.
NOTE that function has been tested with .gtf files from Ensembl. .gtf files
from different sources sometimes have a slightly different format, which
could lead to incompatibilities / errors. See test files for format that
works.
Some tested Ensembl GTF files:
Homo_sapiens.GRCh38.97.gtf.gz
Mus_musculus.GRCm38.81.gtf.gz
Mus_musculus.GRCm38.79.gtf.gz
>>> in_gtf = "test_data/map_test_in.gtf"
>>> exp_out_bed = "test_data/gtf_exon_out_exp.bed"
>>> exp_out_intron_bed = "test_data/gtf_intron_out_exp.bed"
>>> out_bed = "test_data/gtf_exon_out.bed"
>>> out_intron_bed = "test_data/gtf_intron_out.bed"
>>> gtf_extract_exon_bed(in_gtf, out_bed, out_intron_bed=out_intron_bed)
>>> diff_two_files_identical(out_bed, exp_out_bed)
True
>>> diff_two_files_identical(out_intron_bed, exp_out_intron_bed)
True
"""
# Output genomic exon regions.
OUTBED = open(out_bed, "w")
# Read in exon features from GTF file.
c_gtf_ex_feat = 0
# Start end coordinates of exons.
exon_e_dic = {}
exon_s_dic = {}
# Transcript stats.
tr2pol_dic = {}
tr2chr_dic = {}
# dic for sanity checking exon number order.
tr2exon_nr_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "exon":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
# Check if transcript ID is in transcript dic.
if tr_ids_dic:
if not transcript_id in tr_ids_dic:
continue
# Store transcript stats.
tr2pol_dic[transcript_id] = feat_pol
tr2chr_dic[transcript_id] = chr_id
# Check whether exon numbers are incrementing for each transcript ID.
if not transcript_id in tr2exon_nr_dic:
tr2exon_nr_dic[transcript_id] = exon_nr
else:
assert tr2exon_nr_dic[transcript_id] < exon_nr, "transcript ID \"%s\" without increasing exon number order in GTF file \"%s\"" %(transcript_id, in_gtf)
tr2exon_nr_dic[transcript_id] = exon_nr
# Count exon entry.
c_gtf_ex_feat += 1
# Construct exon ID.
exon_id = transcript_id + "_e" + str(exon_nr)
# Store infos.
exon_s_dic[exon_id] = feat_s
exon_e_dic[exon_id] = feat_e
if use_ei_labels:
exon_id = "E"
# Output genomic exon region.
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,feat_s,feat_e,exon_id,feat_pol))
OUTBED.close()
f.close()
# Check for read-in features.
assert c_gtf_ex_feat, "no exon features read in from \"%s\"" %(in_gtf)
# Output intron .bed.
if out_intron_bed:
tr2intron_nr_dic = {}
OUTBED = open(out_intron_bed, "w")
for tr_id in tr2pol_dic:
tr_pol = tr2pol_dic[tr_id]
chr_id = tr2chr_dic[tr_id]
tr_c = tr2exon_nr_dic[tr_id]
intron_c = 0
tr2intron_nr_dic[tr_id] = 0
# 1-exon transcripts, no introns.
if tr_c == 1:
continue
ex_list = []
for i in range(tr_c):
ex_nr = i + 1
ex_id = tr_id + "_e" + str(ex_nr)
ex_list.append(ex_id)
for i in range(len(ex_list)):
ex1i = i
ex2i = i + 1
# At last exon, no more introns to add.
if ex2i == len(ex_list):
break
ex1id = ex_list[ex1i]
ex2id = ex_list[ex2i]
ex1s = exon_s_dic[ex1id]
ex2s = exon_s_dic[ex2id]
ex1e = exon_e_dic[ex1id]
ex2e = exon_e_dic[ex2id]
# Plus case.
intron_s = ex1e
intron_e = ex2s
if tr_pol == "-":
intron_s = ex2e
intron_e = ex1s
intron_id = tr_id + "_i" + str(ex2i)
intron_c += 1
if use_ei_labels:
intron_id = "I"
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,intron_s,intron_e,intron_id,tr_pol))
tr2intron_nr_dic[tr_id] = intron_c
OUTBED.close()
# Sanity check exon + intron numbers.
for tr_id in tr2exon_nr_dic:
exon_nr = tr2exon_nr_dic[tr_id]
intron_nr = tr2intron_nr_dic[tr_id]
assert (exon_nr-1) == intron_nr, "intron number != exon number - 1 for \"%s\" (%i != %i - 1)" %(tr_id, intron_nr, exon_nr)
################################################################################
def gtf_extract_gene_bed(in_gtf, out_bed,
gene_ids_dic=False):
"""
Extract gene regions from in_gtf GTF file, and output to out_bed BED
file.
gene_ids_dic:
Dictionary with gene IDs for filtering (keeping dic IDs).
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> exp_out_bed = "test_data/gtf_gene_out.exp.bed"
>>> tmp_out_bed = "test_data/gtf_gene_out.tmp.bed"
>>> gtf_extract_gene_bed(in_gtf, tmp_out_bed)
>>> diff_two_files_identical(tmp_out_bed, exp_out_bed)
True
"""
# Output gene regions.
OUTBED = open(out_bed, "w")
c_out = 0
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "gene":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Extract gene ID and from infos.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Check if gene ID is in gene dic.
if gene_ids_dic:
if not gene_id in gene_ids_dic:
continue
# Output gene region.
c_out += 1
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,feat_s,feat_e,gene_id,feat_pol))
OUTBED.close()
f.close()
assert c_out, "no regions output to out_bed. Invalid in_gtf or too restrictive gene_ids_dic filtering?"
################################################################################
def gtf_extract_tsl_gene_bed(in_gtf, out_bed,
strict=False,
basic=True,
min_tsl=1,
gene_ids_dic=False):
"""
Extract gene regions from in_gtf GTF file, and output to out_bed BED
file.
gene_ids_dic:
Dictionary with gene IDs for filtering (keeping dic IDs).
strict:
If True only output genes with TSL=1 transcripts.
>>> in_gtf = "test_data/test_tsl_genes.gtf"
>>> exp_out_bed = "test_data/test_tsl_genes.exp.bed"
>>> tmp_out_bed = "test_data/test_tsl_genes.tmp.bed"
>>> gtf_extract_tsl_gene_bed(in_gtf, tmp_out_bed)
>>> diff_two_files_identical(tmp_out_bed, exp_out_bed)
True
"""
# Extract genes with TSL+basic transcripts from gtf.
gene2chrse_dic = {}
gene2pol_dic = {}
gene2keep_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "gene" and not feature == "transcript":
continue
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Extract gene ID and from infos.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Check if gene ID is in genes to select dic.
if gene_ids_dic:
if not gene_id in gene_ids_dic:
continue
# Store gene chrse ,pol.
if feature == "gene":
gene2chrse_dic[gene_id] = "%s\t%i\t%i" %(chr_id, feat_s, feat_e)
gene2pol_dic[gene_id] = feat_pol
continue
"""
We are now in transcript row
Extract transcript ID + transcript_support_level + tag info.
Only output genes with transcript(s) featuring TSL + tag info.
"""
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Look for basic tag.
m = re.search('tag "basic"', infos)
if not m:
if basic:
continue
# Get transcript support level (TSL).
m = re.search('transcript_support_level "(.+?)"', infos)
tsl_id = "NA"
if m:
tsl_id = m.group(1)
if re.search("assigned to previous", tsl_id):
m = re.search("(.+?) \(", tsl_id)
tsl_id = m.group(1)
else:
continue
# Strict filter.
if strict:
if min_tsl:
if int(tsl_id) > min_tsl:
continue
else:
if not tsl_id != "1":
continue
# Store gene ID with TSL + basic support.
gene2keep_dic[gene_id] = 1
f.close()
assert gene2keep_dic, "no remaining genes to output"
# Output gene regions.
OUTBED = open(out_bed, "w")
for gene_id in gene2keep_dic:
OUTBED.write("%s\t%s\t0\t%s\n" % (gene2chrse_dic[gene_id],gene_id,gene2pol_dic[gene_id]))
OUTBED.close()
################################################################################
def gtf_extract_transcript_bed(in_gtf, out_bed,
tr_ids_dic=False):
"""
Extract transcript regions from in_gtf GTF file, and output to out_bed BED
file.
tr_ids_dic:
Dictionary with transcript IDs for filtering (keeping dic IDs).
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> exp_out_bed = "test_data/gtf_transcript_out.exp.bed"
>>> tmp_out_bed = "test_data/gtf_transcript_out.tmp.bed"
>>> gtf_extract_transcript_bed(in_gtf, tmp_out_bed)
>>> diff_two_files_identical(tmp_out_bed, exp_out_bed)
True
"""
# Output transcript regions.
OUTBED = open(out_bed, "w")
c_out = 0
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "transcript":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Check if transcript ID is in transcript dic.
if tr_ids_dic:
if not transcript_id in tr_ids_dic:
continue
# Output genomic exon region.
c_out += 1
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,feat_s,feat_e,transcript_id,feat_pol))
OUTBED.close()
f.close()
assert c_out, "no regions output to out_bed. Invalid in_gtf or too restrictive tr_ids_dic filtering?"
################################################################################
def get_transcript_sequences_from_gtf(in_gtf, in_2bit,
lc_repeats=False,
tr_ids_dic=False):
"""
Get spliced transcript sequences based on in_gtf annotations. For
transcripts with > 1 exon, concatenate the exon sequences to build
the transcript sequence. If one exon is missing / not extracted or
if extracted lengths don't fit, the transcript sequence will be
skipped / not output.
Return dictionary with transcript_id -> sequence mapping.
tr_ids_dic:
Defines transcript IDs for which sequence should be extracted.
"""
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
random_id = uuid.uuid1()
tmp_fa = str(random_id) + ".tmp.fa"
# Transcript sequences dic.
tr_seqs_dic = {}
# Extract transcript exon regions from GTF and store as BED.
gtf_extract_exon_bed(in_gtf, tmp_bed, tr_ids_dic=tr_ids_dic)
# Extract exon region sequences from .2bit.
bed_extract_sequences_from_2bit(tmp_bed, tmp_fa, in_2bit,
lc_repeats=lc_repeats)
# Get transcript lengths from tmp_bed for comparison.
tr_len_dic = bed_get_transcript_lengths_from_exon_regions(tmp_bed)
# Get exon numbers for each transcript.
tr_exc_dic = bed_get_transcript_exon_numbers(tmp_bed)
# Read in sequences.
exon_seqs_dic = read_fasta_into_dic(tmp_fa)
# Concatenate exon region sequences.
for tr_id in tr_exc_dic:
ex_c = tr_exc_dic[tr_id]
for i in range(ex_c):
i += 1
ex_id = tr_id + "_e" + str(i)
if ex_id in exon_seqs_dic:
ex_seq = exon_seqs_dic[ex_id]
if tr_id not in tr_seqs_dic:
tr_seqs_dic[tr_id] = ex_seq
else:
tr_seqs_dic[tr_id] += ex_seq
else:
print("WARNING: no sequence extracted for exon ID \"%s\". Skipping \"%s\" .. " %(ex_id, tr_id))
if tr_id in tr_seqs_dic:
del tr_seqs_dic[tr_id]
break
# Checks.
assert tr_seqs_dic, "tr_seqs_dic empty (no FASTA sequences extracted?)"
for tr_id in tr_seqs_dic:
tr_len = len(tr_seqs_dic[tr_id])
exp_len = tr_len_dic[tr_id]
assert tr_len == exp_len, "BED transcript length != FASTA transcript length for \"%s\"" %(tr_id)
# Delete tmp files.
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
if os.path.exists(tmp_fa):
os.remove(tmp_fa)
# Return transcript sequences dic constructed from exon sequences.
return tr_seqs_dic
################################################################################
def bed_get_transcript_lengths_from_exon_regions(in_bed):
"""
Get spliced transcript lengths from in_bed BED file with transcript
exon regions, with ID format:
transcriptid_e1 (exon 1), transcriptid_e1 (exon 2)
This is the output format from gtf_extract_exon_bed(), so both can
be used in combination.
>>> in_bed = "test_data/test6.bed"
>>> bed_get_transcript_lengths_from_exon_regions(in_bed)
{'ENST1': 4000, 'ENST2': 1500, 'ENST3': 2500}
"""
tr_len_dic = {}
# Open input .bed file.
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_len = site_e - site_s
if re.search(".+_e\d", site_id):
m = re.search("(.+)_e\d", site_id)
tr_id = m.group(1)
if tr_id not in tr_len_dic:
tr_len_dic[tr_id] = site_len
else:
tr_len_dic[tr_id] += site_len
else:
assert False, "site ID \"%s\" missing added _e exon number" %(site_id)
f.close()
assert tr_len_dic, "nothing was read in (\"%s\" empty or malformatted?)" %(in_bed)
return tr_len_dic
################################################################################
def bed_get_transcript_exon_numbers(in_bed):
"""
Get number of exons for each transcript from in_bed BED file with
transcript exon regions, with ID format:
transcriptid_e1 (exon 1), transcriptid_e1 (exon 2)
This is the output format from gtf_extract_exon_bed(), so both can
be used in combination.
>>> in_bed = "test_data/test6.bed"
>>> bed_get_transcript_exon_numbers(in_bed)
{'ENST1': 2, 'ENST2': 2, 'ENST3': 1}
"""
tr_exc_dic = {}
# Open input .bed file.
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_id = cols[3]
if re.search(".+_e\d", site_id):
m = re.search("(.+)_e\d", site_id)
tr_id = m.group(1)
if tr_id not in tr_exc_dic:
tr_exc_dic[tr_id] = 1
else:
tr_exc_dic[tr_id] += 1
else:
assert False, "site ID \"%s\" missing added _e exon number" %(site_id)
f.close()
assert tr_exc_dic, "nothing was read in (\"%s\" empty or malformatted?)" %(in_bed)
return tr_exc_dic
################################################################################
def bed_convert_transcript_to_genomic_sites(in_bed, in_gtf, out_bed,
site2hitc_dic=None,
out_folder=False):
"""
Dependencies:
bedtools (tested with 2.29.0)
gzip
Convert in_bed .bed file with transcript sites into genomic coordinates
sites file. in_bed column 1 transcript IDs have to be present in
in_gtf GTF file, from which genomic exon coordinates of the transcript
will get extracted.
site2hitc_dic:
A site2hitc_dic can be given, where site ID to hit count will be
stored for usage outside the function.
Output:
By default output to out_bed file, using id_p1, id_p2 IDs.
If out_folder=True, use out_bed name as folder name.
In this case, output these files to folder:
exon_regions_genome.bed
exon_regions_transcript.bed
complete_hits.bed
split_hits.bed
all_hits.bed
>>> test_gtf = "test_data/test_tr2gen.gtf"
>>> test_in_bed = "test_data/test_tr2gen.bed"
>>> test_out_exp_bed = "test_data/test_tr2gen.exp.bed"
>>> test_out_tmp_bed = "test_data/test_tr2gen.tmp.bed"
>>> bed_convert_transcript_to_genomic_sites(test_in_bed, test_gtf, test_out_tmp_bed)
>>> diff_two_files_identical(test_out_exp_bed, test_out_tmp_bed)
True
>>> test_out = "test_data/tr2gen_tmp_out"
>>> test_out_tmp_bed = "test_data/tr2gen_tmp_out/all_hits.bed"
>>> bed_convert_transcript_to_genomic_sites(test_in_bed, test_gtf, test_out, out_folder=True)
>>> diff_two_files_identical(test_out_exp_bed, test_out_tmp_bed)
True
"""
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
random_id = uuid.uuid1()
tmp_out = str(random_id) + ".tmp.out"
# Output files if output_folder=True.
if out_folder:
if not os.path.exists(out_bed):
os.makedirs(out_bed)
out_exon_regions_genome_bed = out_bed + "/" + "exon_regions_genome.bed"
out_exon_regions_transcript_bed = out_bed + "/" + "exon_regions_transcript.bed"
out_unique_hits_bed = out_bed + "/" + "unique_hits.bed"
out_split_hits_bed = out_bed + "/" + "split_hits.bed"
out_all_hits_bed = out_bed + "/" + "all_hits.bed"
# Transcript IDs dic.
tr_ids_dic = bed_get_chromosome_ids(in_bed)
# Extract transcript exon regions from GTF and store as BED.
gtf_extract_exon_bed(in_gtf, tmp_bed, tr_ids_dic=tr_ids_dic)
if out_folder:
make_file_copy(tmp_bed, out_exon_regions_transcript_bed)
# Get exon region lengths.
exid2len_dic = bed_get_region_lengths(tmp_bed)
# Get exon numbers for each transcript.
tr_exc_dic = bed_get_transcript_exon_numbers(tmp_bed)
# Read in exon region stats.
id2chr_dic = {}
id2s_dic = {}
id2e_dic = {}
id2pol_dic = {}
exid2trid_dic = {}
with open(tmp_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_pol = cols[5]
id2chr_dic[site_id] = chr_id
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
id2pol_dic[site_id] = site_pol
if re.search(".+_e\d", site_id):
m = re.search("(.+)_e\d", site_id)
tr_id = m.group(1)
exid2trid_dic[site_id] = tr_id
else:
assert False, "site ID \"%s\" missing added _e exon number" %(site_id)
f.close()
# Output exon regions with transcript coordinates.
OUTBED = open(tmp_bed, "w")
for tr_id in tr_exc_dic:
ex_c = tr_exc_dic[tr_id]
new_s = 0
for i in range(ex_c):
i += 1
ex_id = tr_id + "_e" + str(i)
gen_s = id2s_dic[ex_id]
gen_e = id2e_dic[ex_id]
ex_len = gen_e - gen_s
tr_s = new_s
tr_e = new_s + ex_len
OUTBED.write("%s\t%i\t%i\t%s\t0\t+\n" % (tr_id,tr_s,tr_e,ex_id))
new_s = tr_e
OUTBED.close()
if out_folder:
make_file_copy(tmp_bed, out_exon_regions_genome_bed)
# Overlap in_bed with tmp_bed.
params = "-wb"
intersect_bed_files(in_bed, tmp_bed, params, tmp_out,
sorted_out=True)
# Read in transcript site overlaps with transcript exon regions.
site2c_dic = {}
# Dictionaries for later outputting unique + split hits separately.
siteid2pol_dic = {}
siteid2sc_dic = {}
partid2chrse_dic = {}
with open(tmp_out) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
tr_id = cols[0]
part_s = int(cols[1])
part_e = int(cols[2])
site_id = cols[3]
site_sc = cols[4]
ex_s = int(cols[7])
ex_e = int(cols[8])
ex_id = cols[9]
ex_pol = id2pol_dic[ex_id]
siteid2pol_dic[site_id] = ex_pol
siteid2sc_dic[site_id] = site_sc
if site_id in site2c_dic:
site2c_dic[site_id] += 1
else:
site2c_dic[site_id] = 1
# Hit part number.
hit_c = site2c_dic[site_id]
# Calculate genomic hit coordinates.
# Plus strand case.
gen_s = id2s_dic[ex_id] + part_s - ex_s
gen_e = id2s_dic[ex_id] + part_e - ex_s
# Minus strand case.
if ex_pol == "-":
gen_s = id2e_dic[ex_id] - part_e + ex_s
gen_e = id2e_dic[ex_id] - part_s + ex_s
# part ID.
part_id = site_id + "_p" + str(hit_c)
# Store chrse for each part ID.
chrse = "%s\t%i\t%i" %(id2chr_dic[ex_id],gen_s,gen_e)
partid2chrse_dic[part_id] = "%s\t%i\t%i" %(id2chr_dic[ex_id],gen_s,gen_e)
# Produce seperate output files for unique + split hits.
all_hits_bed = out_bed
if out_folder:
all_hits_bed = out_all_hits_bed
ALLBED = open(all_hits_bed, "w")
if out_folder:
UNIBED = open(out_unique_hits_bed, "w")
SPLBED = open(out_split_hits_bed, "w")
for site_id in site2c_dic:
hit_c = site2c_dic[site_id]
if site2hitc_dic is not None:
site2hitc_dic[site_id] = hit_c
site_pol = siteid2pol_dic[site_id]
site_sc = siteid2sc_dic[site_id]
# For unique hit use site ID, for split hits use part IDs.
if hit_c == 1:
# Unique hits.
part_id = site_id + "_p1"
UNIBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],site_id,site_sc,site_pol))
else:
# Split hits.
for i in range(hit_c):
i += 1
part_id = site_id + "_p" + str(i)
SPLBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],part_id,site_sc,site_pol))
# Output all hits.
for site_id in site2c_dic:
hit_c = site2c_dic[site_id]
if site2hitc_dic is not None:
site2hitc_dic[site_id] = hit_c
site_pol = siteid2pol_dic[site_id]
site_sc = siteid2sc_dic[site_id]
# For unique hit use site ID, for split hits use part IDs.
if hit_c == 1:
# Unique hits.
part_id = site_id + "_p1"
ALLBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],site_id,site_sc,site_pol))
else:
# Split hits.
for i in range(hit_c):
i += 1
part_id = site_id + "_p" + str(i)
ALLBED.write("%s\t%s\t%s\t%s\n" %(partid2chrse_dic[part_id],part_id,site_sc,site_pol))
# Delete tmp files.
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
if os.path.exists(tmp_out):
os.remove(tmp_out)
################################################################################
def check_convert_chr_id(chr_id):
"""
Check and convert chromosome IDs to format:
chr1, chr2, chrX, ...
If chromosome IDs like 1,2,X, .. given, convert to chr1, chr2, chrX ..
Return False if given chr_id not standard and not convertable.
Filter out scaffold IDs like:
GL000009.2, KI270442.1, chr14_GL000009v2_random
chrUn_KI270442v1 ...
>>> chr_id = "chrX"
>>> check_convert_chr_id(chr_id)
'chrX'
>>> chr_id = "4"
>>> check_convert_chr_id(chr_id)
'chr4'
>>> chr_id = "MT"
>>> check_convert_chr_id(chr_id)
'chrM'
>>> chr_id = "GL000009.2"
>>> check_convert_chr_id(chr_id)
False
>>> chr_id = "chrUn_KI270442v1"
>>> check_convert_chr_id(chr_id)
False
"""
assert chr_id, "given chr_id empty"
if re.search("^chr", chr_id):
if not re.search("^chr[\dMXY]+$", chr_id):
chr_id = False
else:
# Convert to "chr" IDs.
if chr_id == "MT":
chr_id = "M"
if re.search("^[\dMXY]+$", chr_id):
chr_id = "chr" + chr_id
else:
chr_id = False
return chr_id
################################################################################
def check_dic1_keys_in_dic2(dic1, dic2):
"""
Check if keys in dic1 are all present in dic2, return True if present,
otherwise False.
>>> d1 = {'hallo': 1, 'hello' : 1}
>>> d2 = {'hallo': 1, 'hello' : 1, "bonjour" : 1}
>>> check_dic1_keys_in_dic2(d1, d2)
True
>>> d1 = {'hallo': 1, 'ciao' : 1}
>>> check_dic1_keys_in_dic2(d1, d2)
False
"""
assert dic1, "dic1 empty"
assert dic2, "dic2 empty"
check = True
for key in dic1:
if key not in dic2:
check = False
break
return check
################################################################################
def get_nr_dic1_keys_in_dic2(dic1, dic2):
"""
Return number of dic1 keys found in dic2.
>>> d1 = {'hallo': 1, 'hello' : 1}
>>> d2 = {'hallo': 1, 'hello' : 1, "bonjour" : 1}
>>> get_nr_dic1_keys_in_dic2(d1, d2)
2
>>> d1 = {'hollo': 1, 'ciao' : 1}
>>> get_nr_dic1_keys_in_dic2(d1, d2)
0
"""
assert dic1, "dic1 empty"
assert dic2, "dic2 empty"
dic1_keys_found = 0
for key in dic1:
if key in dic2:
dic1_keys_found += 1
return dic1_keys_found
################################################################################
def bed_get_chromosome_ids(bed_file,
std_chr_filter=False,
ids_dic=False):
"""
Read in .bed file, return chromosome IDs (column 1 IDs).
Return dic with chromosome ID -> count mapping.
ids_dic:
A non-empty ids_dic can be supplied, resulting in chromosome IDs
to be added to the existing ids_dic dictionary.
std_chr_filter:
Filter / convert chromosome IDs with function check_convert_chr_id(),
removing non-standard chromosomes, and convert IDs like 1,2,X,MT ..
to chr1, chr2, chrX, chrM.
>>> test_file = "test_data/test6.bed"
>>> bed_get_chromosome_ids(test_file)
{'chr1': 2, 'chr2': 2, 'chr3': 1}
"""
if not ids_dic:
ids_dic = {}
with open(bed_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
# Check ID.
if std_chr_filter:
new_chr_id = check_convert_chr_id(chr_id)
# If not standard chromosome ID or conversion failed, skip entry.
if not new_chr_id:
continue
else:
chr_id = new_chr_id
if chr_id in ids_dic:
ids_dic[chr_id] += 1
else:
ids_dic[chr_id] = 1
f.closed
assert ids_dic, "No chromosome IDs read into dictionary (input file \"%s\" empty or malformatted? Chromosome IDs filter activated?)" % (bed_file)
return ids_dic
################################################################################
def bed_get_score_filtered_count(bed_file, sc_thr,
rev_filter=False):
"""
Read in BED file and count how many rows remain after filtering
column 5 scores by sc_thr. By default, assume higher score == better
score. Set rev_filter=True to reverse this.
rev_filter:
Set True to reverse filtering.
>>> test_file = "test_data/test5.bed"
>>> bed_get_score_filtered_count(test_file, 3)
1
>>> bed_get_score_filtered_count(test_file, 3, rev_filter=True)
2
>>> bed_get_score_filtered_count(test_file, 4)
0
"""
c_rem = 0
with open(bed_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_sc = float(cols[4])
if rev_filter:
if site_sc > sc_thr:
continue
else:
if site_sc < sc_thr:
continue
c_rem += 1
f.closed
return c_rem
################################################################################
def gtf_extract_exon_numbers(in_gtf,
tr_ids_dic=False):
"""
Given a .gtf file with exon features, return dictionary with transcript
ID and exon number.
tr_ids_dic:
Give tr_ids_dic dictionary with transcript IDs to keep.
>>> in_gtf = "test_data/test_border_annot.gtf"
>>> tr_ids_dic = {'ENST1': 1, 'ENST2': 1, 'ENST3': 1}
>>> gtf_extract_exon_numbers(in_gtf, tr_ids_dic=tr_ids_dic)
{'ENST1': 1, 'ENST2': 2, 'ENST3': 2}
"""
# Transcript ID to exon count dic.
tr2exc_dic = {}
# dic for sanity checking exon number order.
tr2exon_nr_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
infos = cols[8]
if not feature == "exon":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
if tr_ids_dic:
if transcript_id not in tr_ids_dic:
continue
# Count exon numbers.
if not transcript_id in tr2exc_dic:
tr2exc_dic[transcript_id] = 1
else:
tr2exc_dic[transcript_id] += 1
# Check whether exon numbers are incrementing for each transcript ID.
if not transcript_id in tr2exon_nr_dic:
tr2exon_nr_dic[transcript_id] = exon_nr
else:
assert tr2exon_nr_dic[transcript_id] < exon_nr, "transcript ID \"%s\" without increasing exon number order in GTF file \"%s\"" %(transcript_id, in_gtf)
tr2exon_nr_dic[transcript_id] = exon_nr
f.close()
# Check for read-in content.
assert tr2exc_dic, "no exon features read in from \"%s\"" %(in_gtf)
# Return to the castle.
return tr2exc_dic
################################################################################
def bed_overlap_with_genomic_features(in_bed, feat_bed,
out_file=False,
int_whole_nr=True,
use_feat_sc=False):
"""
Overlap genomic regions in_bed BED with feature regions feat_bed BED.
Return a dictionary of lists, with key = in_bed region ID and value
a list of positions with length = region length, indicating for each
position overlap (value = 1 or feat_bed region score) or not (value = 0).
This means, each genomic position inside in_bed that overlaps with a
region inside feat_bed will get either a value of 1 in the list, or
the score of the overlapping region inside feat_bed (if use_feat_sc=True).
Each genomic position inside in_bed not overlapping with feat_bed
regions will get a 0 assigned. Note that the order of the list is
the order of the sequence nucleotides (not the genomic position, which
can be reversed for minus strand features).
in_bed:
Input BED regions file to add positional feature annotations to.
feat_bed:
Feature BED regions file from where to get positional annotations from.
out_file:
Output in_bed annotations to file.
Format is:
>region_id
0
1
...
use_feat_sc:
Use overlapping feature region scores instead of a value of 1
for overlapping regions.
>>> in_bed = "test_data/test7.in.bed"
>>> feat_bed = "test_data/test7.feat.bed"
>>> exp_out = "test_data/test7.exp.out"
>>> tmp_out = "test_data/test7.tmp.out"
>>> bed_overlap_with_genomic_features(in_bed, feat_bed, out_file=tmp_out, use_feat_sc=True)
{'reg1': [0, 0, 5, 5, 5], 'reg2': [5, 0, 0, 0, 0]}
>>> diff_two_files_identical(exp_out, tmp_out)
True
>>> bed_overlap_with_genomic_features(in_bed, feat_bed)
{'reg1': [0, 0, 1, 1, 1], 'reg2': [1, 0, 0, 0, 0]}
"""
# Check.
assert is_tool("bedtools"), "bedtools not in PATH"
assert os.path.isfile(in_bed), "cannot open in_bed BED file \"%s\"" % (in_bed)
assert os.path.isfile(feat_bed), "cannot open feat_bed BED file \"%s\"" % (feat_bed)
# Generate .tmp files.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
# Get polarity, start, end for each site ID.
id2pol_dic = {}
id2s_dic = {}
id2e_dic = {}
# Dictionary of lists, store position labels, init with 0.
id2labels_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_pol = cols[5]
id2pol_dic[site_id] = site_pol
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
site_l = site_e - site_s
assert site_l, "invalid site length for row \"%s\" in in_bed \"%s\"" %(row, in_bed)
id2labels_dic[site_id] = [0]*site_l
f.closed
assert id2pol_dic, "no entries read into dictionary (input file \"%s\" empty or malformatted?)" % (in_bed)
# Run overlap calculation to get overlapping feature regions.
intersect_params = "-s -wb"
intersect_bed_files(in_bed, feat_bed, intersect_params, tmp_bed)
with open(tmp_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
s = int(cols[1]) + 1 # Make one-based.
e = int(cols[2])
site_id = cols[3]
site_s = id2s_dic[site_id] + 1 # Make one-based.
site_e = id2e_dic[site_id]
site_pol = id2pol_dic[site_id]
feat_sc = float(cols[10])
# Check whether score is whole number.
if int_whole_nr:
if not feat_sc % 1:
feat_sc = int(feat_sc)
# + case.
if site_pol == "+":
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = i - site_s
if use_feat_sc:
id2labels_dic[site_id][li] = feat_sc
else:
id2labels_dic[site_id][li] = 1
else:
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = site_e - i
if use_feat_sc:
id2labels_dic[site_id][li] = feat_sc
else:
id2labels_dic[site_id][li] = 1
f.closed
# It output to file enabled.
if out_file:
# Write labels to file.
OUTLAB = open(out_file,"w")
for site_id in id2labels_dic:
OUTLAB.write(">%s\n" %(site_id))
for label in id2labels_dic[site_id]:
OUTLAB.write("%s\n" %(str(label)))
OUTLAB.close()
# Remove tmp files.
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
# Return dictionary of lists.
return id2labels_dic
################################################################################
def gtf_extract_most_prominent_transcripts(in_gtf, out_file,
strict=False,
min_len=False,
report=False,
return_ids_dic=None,
set_ids_dic=False,
add_infos=False):
"""
Extract most prominent transcripts list from in_gtf.
in_gtf:
Genomic annotations (hg38) GTF file (.gtf or .gtf.gz)
NOTE: tested with Ensembl GTF files, expects transcript
support level (TSL) information.
out_file:
File to output transcript IDs (optionally with add_infos)
min_len:
Accept only transcripts with length >= --min-len
strict:
Accept only transcripts with transcript support level (TSL) 1-5
return_ids_dic:
If dictionary is given, return IDs in dictionary and do not output
to file.
set_ids_dic:
Optionally provide transcript IDs which should be chosen as most
prominent transcript for their respective gene.
add_infos:
Add additional information columns (gene ID, TSL, length) to out_list
output file.
>>> in_gtf = "test_data/test_most_prom_select.gtf"
>>> out_file = "dummy"
>>> ids_dic = {}
>>> gtf_extract_most_prominent_transcripts(in_gtf, out_file,return_ids_dic=ids_dic)
{'ENST02': 10000, 'ENST05': 8000}
>>> ids_dic = {}
>>> gtf_extract_most_prominent_transcripts(in_gtf, out_file,return_ids_dic=ids_dic,strict=True)
{'ENST05': 8000}
"""
# Comparison dictionary.
id2sc = {}
for i in range(5):
pos = i + 1
pos_str = "%i" %(pos)
id2sc[pos_str] = pos
id2sc["NA"] = 6
if report:
if strict:
print("Strict transcript selection enabled ... ")
if add_infos:
print("Additional transcript infos in output file enabled ... ")
# Read in transcript length (exonic regions).
if report:
print("Read in transcript lengths (exonic lengths) from GTF ... ")
tr2exc_dic = {}
tr2len_dic = gtf_get_transcript_lengths(in_gtf, tr2exc_dic=tr2exc_dic)
assert tr2len_dic, "no transcript lengths read in from --gtf (invalid file format?)"
if report:
print("# transcripts read in: %i" %(len(tr2len_dic)))
# Store most prominent transcript.
g2tr_id = {}
g2tr_tsl = {}
g2tr_len = {}
g2tr_bt = {}
g2gn = {}
g2gbt = {}
if report:
print("Extract most prominent transcripts ... ")
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "transcript":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Extract gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_id = m.group(1)
# Extract gene name.
m = re.search('gene_name "(.+?)"', infos)
assert m, "gene_name entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_name = m.group(1)
# Extract gene biotype.
m = re.search('gene_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('gene_type "(.+?)"', infos)
assert m, "gene_biotype or gene_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_biotype = m.group(1)
# Extract transcript biotype.
m = re.search('transcript_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('transcript_type "(.+?)"', infos)
assert m, "transcript_biotype or transcript_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_biotype = m.group(1)
# Transcript length.
tr_len = tr2len_dic[tr_id]
# Gene name.
g2gn[gene_id] = gene_name
# Gene biotype.
g2gbt[gene_id] = gene_biotype
# If dictionary with transcript IDs given that should be selected.
if set_ids_dic:
if tr_id in set_ids_dic:
g2tr_id[gene_id] = tr_id
g2tr_len[gene_id] = tr_len
g2tr_tsl[gene_id] = "1"
g2tr_bt[gene_id] = tr_biotype
continue
# Look for basic tag.
m = re.search('tag "basic"', infos)
if not m:
continue
# Get transcript support level (TSL).
m = re.search('transcript_support_level "(.+?)"', infos)
tsl_id = "NA"
if m:
tsl_id = m.group(1)
if re.search("assigned to previous", tsl_id):
m = re.search("(.+?) \(", tsl_id)
tsl_id = m.group(1)
# More filtering.
if strict:
if tsl_id == "NA":
continue
if min_len:
if tr_len < min_len:
continue
# Update most prominent transcript.
if not gene_id in g2tr_id:
g2tr_id[gene_id] = tr_id
g2tr_len[gene_id] = tr_len
g2tr_tsl[gene_id] = tsl_id
g2tr_bt[gene_id] = tr_biotype
else:
if id2sc[tsl_id] < id2sc[g2tr_tsl[gene_id]]:
g2tr_id[gene_id] = tr_id
g2tr_len[gene_id] = tr_len
g2tr_tsl[gene_id] = tsl_id
g2tr_bt[gene_id] = tr_biotype
elif id2sc[tsl_id] == id2sc[g2tr_tsl[gene_id]]:
if tr_len > g2tr_len[gene_id]:
g2tr_id[gene_id] = tr_id
g2tr_len[gene_id] = tr_len
g2tr_tsl[gene_id] = tsl_id
g2tr_bt[gene_id] = tr_biotype
f.close()
assert g2tr_id, "No IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (in_gtf)
c_prom_tr = len(g2tr_id)
if report:
print("Number of selected transcripts: %i" %(c_prom_tr))
# If transcript IDs should be output to out_file.
if return_ids_dic is None:
# Output transcript IDs list.
OUT = open(out_file, "w")
if add_infos:
OUT.write("gene_id\tgene_name\tgene_biotype\ttr_id\ttr_biotype\ttr_len\ttr_exc\ttsl\n")
for gene_id in g2tr_id:
tr_id = g2tr_id[gene_id]
tr_len = g2tr_len[gene_id]
tsl_id = g2tr_tsl[gene_id]
tr_bt = g2tr_bt[gene_id]
tr_exc = tr2exc_dic[tr_id]
gene_name = g2gn[gene_id]
gene_bt = g2gbt[gene_id]
if add_infos:
OUT.write("%s\t%s\t%s\t%s\t%s\t%i\t%i\t%s\n" % (gene_id,gene_name,gene_bt,tr_id,tr_bt,tr_len,tr_exc,tsl_id))
else:
OUT.write("%s\n" % (tr_id))
OUT.close()
if report:
if add_infos:
print("%i transcript IDs + additional infos written to:\n%s" %(c_prom_tr, out_file))
else:
print("%i transcript IDs written to:\n%s" %(c_prom_tr, out_file))
else:
for gene_id in g2tr_id:
tr_id = g2tr_id[gene_id]
tr_len = g2tr_len[gene_id]
return_ids_dic[tr_id] = tr_len
assert return_ids_dic, "no most prominent transcript IDs selected"
return return_ids_dic
################################################################################
def gtf_get_transcript_lengths(in_gtf,
tr2exc_dic=None):
"""
Get transcript lengths (= length of their exons, not unspliced length!)
from GTF file.
tr2exc_dic:
Optionally provide a transcript ID to exon count dictionary for counting
transcript exons.
>>> in_gtf = "test_data/map_test_in.gtf"
>>> gtf_get_transcript_lengths(in_gtf)
{'ENST001': 2000, 'ENST002': 2000}
"""
# Transcript ID to exonic length dictionary.
tr2len_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
infos = cols[8]
if not feature == "exon":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_id = m.group(1)
# Sum up length.
ex_len = feat_e - feat_s + 1
if not tr_id in tr2len_dic:
tr2len_dic[tr_id] = ex_len
else:
tr2len_dic[tr_id] += ex_len
if tr2exc_dic is not None:
if not tr_id in tr2exc_dic:
tr2exc_dic[tr_id] = 1
else:
tr2exc_dic[tr_id] += 1
f.close()
assert tr2len_dic, "No IDs read into dictionary (--gtf file \"%s\" empty or malformatted?)" % (in_gtf)
return tr2len_dic
################################################################################
def shuffle_sequences(seqs_dic,
new_ids=False,
id_prefix="CLIP",
di_shuffle=False):
"""
Shuffle sequences given by seqs_dic (key = sequence ID, value = sequence).
Return shuffled sequences in new dictionary, optionally with new IDs.
new_ids:
Assign new IDs to shuffled sequences.
id_prefix:
Use this ID prefix for the new sequence IDs (if new_ids=True).
di_shuffle:
Apply di-nucleotide shuffling to sequences, to preserve di-nucleotide
frequencies.
"""
new_seqs_dic = {}
assert seqs_dic, "given seqs_dic dictionary empty?"
c_ids = 0
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
c_ids += 1
new_id = seq_id
if new_ids:
new_id = id_prefix + "_" + c_ids
if len(seq) < 3:
new_seqs_dic[new_id] = seq
else:
if di_shuffle:
new_seq = shuffle_difreq(seq)
new_seqs_dic[new_id] = new_seq
else:
seq_list = list(seq)
random.shuffle(seq_list)
new_seq = ''.join(seq_list)
new_seqs_dic[new_id] = new_seq
assert new_seqs_dic, "generated new_seqs_dic dictionary empty?"
return new_seqs_dic
################################################################################
def shuffle_difreq(seq):
"""
Does di-nucleotide shuffling of sequences, preserving the frequencies.
Code found here:
https://www.biostars.org/p/66004/
"""
# Need more li(m)bs.
from collections import Counter
weighted_choice = lambda s : random.choice(sum(([v]*wt for v,wt in s),[]))
# Get di-nucleotide frequencies.
freqs = difreq(seq)
# get the first base by the total frequency across the sequence
shuff_seq = [None]
while not shuff_seq[0] in freqs:
shuff_seq = [weighted_choice(Counter(seq).items())]
while len(shuff_seq) < len(seq):
# each following base is based of the frequency of the previous base
# and their co-occurrence in the original sequence.
try:
shuff_seq.append(weighted_choice(freqs[shuff_seq[-1]].items()))
except KeyError:
shuff_seq.pop()
assert len(shuff_seq) == len(seq)
return "".join(shuff_seq)
################################################################################
def difreq(seq):
"""
Does di-nucleotide shuffling of sequences, preserving the frequences.
Code found here:
https://www.biostars.org/p/66004/
"""
from collections import defaultdict
counts = defaultdict(lambda: defaultdict(int))
for a, b in zip(seq, seq[1:]):
counts[a][b] += 1
return dict((k, dict(v)) for k,v in counts.items())
################################################################################
def output_chromosome_lengths_file(len_dic, out_file,
ids2print_dic=None):
"""
Output chromosome lengths file with format:
sequence_ID<tab>sequence_length
"""
LOUT = open(out_file, "w")
c_pr = 0
for seq_id in len_dic:
if ids2print_dic is not None:
if seq_id in ids2print_dic:
c_pr += 1
LOUT.write("%s\t%i\n" %(seq_id, len_dic[seq_id]))
else:
c_pr += 1
LOUT.write("%s\t%i\n" %(seq_id, len_dic[seq_id]))
LOUT.close()
assert c_pr, "nothing was printed out"
################################################################################
def bed_sequence_lengths_to_bed(len_dic, out_file,
ids_dic=None):
"""
Given a dictionary of sequence lengths (sequence_id -> sequence_length),
output the sequence regions to BED, with sequence ID as column 1 + 4,
region start = 0, and region end = sequence_length.
ids_dic:
Dictionary with IDs to output (instead of all IDs in len_dic).
>>> len_dic = {"tr1": 100, "tr2": 50}
>>> out_exp_bed = "test_data/test_lengths_to_bed.exp.bed"
>>> out_tmp_bed = "test_data/test_lengths_to_bed.tmp.bed"
>>> bed_sequence_lengths_to_bed(len_dic, out_tmp_bed)
>>> diff_two_files_identical(out_exp_bed, out_tmp_bed)
True
"""
assert len_dic, "given dictionary len_dic is empty"
LOUT = open(out_file, "w")
c_out = 0
for seq_id in len_dic:
seq_len = len_dic[seq_id]
if ids_dic is not None:
if seq_id in ids_dic:
c_out += 1
LOUT.write("%s\t0\t%i\t%s\t0\t+\n" %(seq_id, seq_len, seq_id))
else:
c_out += 1
LOUT.write("%s\t0\t%i\t%s\t0\t+\n" %(seq_id, seq_len, seq_id))
LOUT.close()
assert c_out, "no sequence regions output to BED file"
################################################################################
def extract_transcript_sequences(bed_dic, seq_dic,
ext_lr=False,
revcom=False,
full_hits_only=False):
"""
Given a dictionary with bed regions (region ID -> BED row) and a
sequence dictionary (Sequence ID -> sequence), extract the BED region
sequences and return in new dictionary (region ID -> region sequence).
ext_lr:
Optionally, extend regions by ext_lr nt (up- and downstream).
In case full extension is not possible, use maximum extension possible.
revcom:
if revcom=True and strand of bed_dic region is "-", return the reverse
complement of the region sequence.
full_hits_only:
Set full_hits_only=True to only recover full hits.
>>> seq_dic = {"T1" : "AAAACCCCGGGGTTTT", "T2" : "ATATACACAGAGCGCGCTCTGTGT"}
>>> bed_dic = {"S1" : "T1\\t4\\t8\\tS1\\t0\\t+", "S2" : "T2\\t6\\t8\\tS2\\t0\\t+"}
>>> extract_transcript_sequences(bed_dic, seq_dic, ext_lr=2)
{'S1': 'AACCCCGG', 'S2': 'ACACAG'}
>>> extract_transcript_sequences(bed_dic, seq_dic, ext_lr=5, full_hits_only=True)
{'S2': 'TATACACAGAGC'}
"""
id2seq_dic = {}
# Process .bed regions.
for reg_id in bed_dic:
cols = bed_dic[reg_id].split("\t")
seq_id = cols[0]
reg_s = int(cols[1])
reg_e = int(cols[2])
reg_pol = cols[5]
assert seq_id in seq_dic, "sequence ID \"%s\" not found in given sequence dictionary" %(seq_id)
seq = seq_dic[seq_id]
# Update region borders.
new_s = reg_s
new_e = reg_e
exp_l = new_e - new_s
# Adjust if given start or end is out of bounds.
if new_s < 0:
new_s = 0
if new_e > len(seq):
new_e = len(seq)
# If region should be extended up- and downstream by ext_lr.
if ext_lr:
new_s = new_s - ext_lr
new_e = reg_e + ext_lr
exp_l = new_e - new_s
# If start or end is out of bounds after extension.
if new_s < 0:
new_s = 0
if new_e > len(seq):
new_e = len(seq)
reg_seq = seq[new_s:new_e]
reg_l = len(reg_seq)
if full_hits_only:
if not reg_l == exp_l:
continue
if revcom:
if reg_pol == "-":
id2seq_dic[reg_id] = revcom_seq(reg_seq)
else:
id2seq_dic[reg_id] = reg_seq
else:
id2seq_dic[reg_id] = reg_seq
assert id2seq_dic, "no sequences extracted"
return id2seq_dic
################################################################################
def revcom_seq(seq,
upper=False,
convert_to_rna=False):
"""
Return reverse complement to seq. By default, convert seq to uppercase
and translate to DNA.
# Convert to RNA.
if convert_to_rna:
new_seq_rna = new_seq.replace("T","U").replace("t","u")
new_seq = new_seq_rna
>>> seq = "AAACAGatt"
>>> revcom_seq(seq)
'aatCTGTTT'
>>> revcom_seq(seq, upper=True)
'AATCTGTTT'
>>> revcom_seq(seq, convert_to_rna=True)
'aauCUGUUU'
"""
assert seq, "given sequence empty"
# Make uppercase and convert to DNA.
if upper:
seq = seq[::-1].upper().replace("U","T")
else:
seq = seq[::-1].replace("U","T").replace("u","t")
intab = "ACGTacgt"
outtab = "TGCAtgca"
# If RNA revcom should be output.
if convert_to_rna:
seq = seq.replace("T","U").replace("t","u")
intab = "ACGUacgu"
outtab = "UGCAugca"
# Make revcom.
transtab = str.maketrans(intab, outtab)
rc_seq = seq.translate(transtab)
return rc_seq
################################################################################
def random_order_dic_keys_into_list(in_dic):
"""
Read in dictionary keys, and return random order list of IDs.
"""
import random
id_list = []
for key in in_dic:
id_list.append(key)
random.shuffle(id_list)
return id_list
################################################################################
def ushuffle_sequences(seqs_dic,
new_ids=False,
id2vpse_dic=False,
id_prefix="CLIP",
ushuffle_k=1):
"""
Shuffle sequences given by seqs_dic (key = sequence ID, value = sequence).
Return shuffled sequences in new dictionary, optionally with new IDs.
This function uses uShuffle, available in Python here:
https://github.com/guma44/ushuffle
uShuffle can be installed inside conda environment with:
pip install ushuffle
Example code for Python 3 (input and output are byte objects, not strings):
from ushuffle import shuffle, Shuffler
seq = b"acgtgattagctagct"
shuffler = Shuffler(seq, 2)
for i in range(5):
seqres = shuffler.shuffle()
print("results:", seqres)
print(shuffle(seq, 2))
Output:
results: b'agctacgatgttagct'
results: b'atagctacgagttgct'
results: b'atgctagcgagtactt'
results: b'agcgctgatacttagt'
results: b'agtgattagctacgct'
b'agcgagctgttactat'
new_ids:
Assign new IDs to shuffled sequences.
id_prefix:
Use this ID prefix for the new sequence IDs (if new_ids=True).
ushuffle_k:
Supply ushuffle_k for k-nucleotide shuffling.
id2vpse_dic:
Dictionary with sequence ID -> [viewpoint_start, viewpoint_end].
Use it to restore lowercase uppercase regions for each sequence,
after shuffling.
"""
from ushuffle import shuffle
new_seqs_dic = {}
assert seqs_dic, "given seqs_dic dictionary empty?"
c_ids = 0
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
seq = seq.upper()
if id2vpse_dic: # note: 1-based coords inside dic.
assert seq_id in id2vpse_dic, "sequence ID %s not in id2vpse_dic" %(seq_id)
vp_s = id2vpse_dic[seq_id][0]
vp_e = id2vpse_dic[seq_id][1]
c_ids += 1
new_id = seq_id
if new_ids:
new_id = id_prefix + "_" + str(c_ids)
if id2vpse_dic:
id2vpse_dic[new_id] = [vp_s, vp_e]
if len(seq) < 3:
new_seqs_dic[new_id] = seq
else:
# String to byte object.
seq_bo = seq.encode('ASCII')
shuff_seq_bo = shuffle(seq_bo, ushuffle_k)
shuff_seq = shuff_seq_bo.decode('ASCII')
# Restore lowercase uppercase structure of original positive sequence.
if id2vpse_dic:
new_seqs_dic[new_id] = add_lowercase_context_to_sequences(shuff_seq, vp_s, vp_e)
else:
new_seqs_dic[new_id] = shuff_seq
assert new_seqs_dic, "generated new_seqs_dic dictionary empty?"
return new_seqs_dic
################################################################################
def add_lowercase_context_to_sequences(seq, uc_s, uc_e,
convert_to_rna=False):
"""
Given a sequence and uppercase middle region start (uc_s) and end (uc_e),
make context region upstream + downstream lowercase.
Two coordinates should be one-based.
Return lowercase-uppercase-lowercase sequence.
convert_to_rna:
If True, convert new sequence to RNA.
>>> seq = "AAAACCCCGGGGTTTT"
>>> add_lowercase_context_to_sequences(seq, 5, 12, convert_to_rna=True)
'aaaaCCCCGGGGuuuu'
>>> add_lowercase_context_to_sequences(seq, 1, 8)
'AAAACCCCggggtttt'
>>> add_lowercase_context_to_sequences(seq, 15, 16)
'aaaaccccggggttTT'
"""
# Checks.
seq_l = len(seq)
assert uc_s < uc_e, "uc_s < uc_e not satisfied"
assert uc_s > 0, "uc_s > 0 not satisfied"
assert seq_l >= uc_e, "uppercase region end > sequence length"
us_seq = seq[:uc_s-1].lower()
center_seq = seq[uc_s-1:uc_e].upper()
ds_seq = seq[uc_e:].lower()
# New sequence.
new_seq = us_seq + center_seq + ds_seq
# Convert to RNA.
if convert_to_rna:
new_seq_rna = new_seq.replace("T","U").replace("t","u")
new_seq = new_seq_rna
return new_seq
################################################################################
def gtf_get_gene_ids_from_transcript_ids(tr_ids_dic, in_gtf,
gene_ids_dic=False):
"""
Get gene IDs for a dictionary of transcript IDs,
returning dictionary with transcript ID (key) mapped to its gene ID (value).
gene_ids_dic:
If set, return dictionary with mapped gene IDs as keys only.
>>> tr_ids_dic = {'ENST01': 1, 'ENST02': 1}
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_get_gene_ids_from_transcript_ids(tr_ids_dic, in_gtf)
{'ENST01': 'ENSG01', 'ENST02': 'ENSG02'}
>>> gtf_get_gene_ids_from_transcript_ids(tr_ids_dic, in_gtf, gene_ids_dic=True)
{'ENSG01': 1, 'ENSG02': 1}
"""
# Checks.
assert tr_ids_dic, "given dictionary tr_ids_dic empty"
# Output dic.
out_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if not feature == "transcript":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
if not transcript_id in tr_ids_dic:
continue
# Extract gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
if gene_ids_dic:
out_dic[gene_id] = 1
else:
out_dic[transcript_id] = gene_id
f.close()
# Check and return to barracks.
assert out_dic, "out_dic empty, transcript features or gene IDs read in"
return out_dic
################################################################################
def gtf_get_gene_biotypes_from_transcript_ids(tr_ids_dic, in_gtf,
all_gbtc_dic=None):
"""
Get gene IDs from dictionary of transcript IDs (tr_ids_dic), and based
on these gene IDs create a dictionary of gene biotype counts.
Return dictionary of gene biotype counts.
all_gbtc_dic:
If set, fill this dictionary with gene biotype counts for all genes.
>>> tr_ids_dic = {'ENST01': 1, 'ENST02': 1}
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_get_gene_biotypes_from_transcript_ids(tr_ids_dic, in_gtf)
{'transcribed_unprocessed_pseudogene': 2}
"""
# Checks.
assert tr_ids_dic, "given dictionary tr_ids_dic empty"
# transcript to gene ID dictionary.
t2g_dic = {}
# Gene to biotype dictionary.
g2bt_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if feature == "gene":
# Extract gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Extract gene biotype.
m = re.search('gene_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('gene_type "(.+?)"', infos)
assert m, "gene_biotype or gene_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_biotype = m.group(1)
# Store infos.
g2bt_dic[gene_id] = gene_biotype
if all_gbtc_dic is not None:
if gene_biotype in all_gbtc_dic:
all_gbtc_dic[gene_biotype] += 1
else:
all_gbtc_dic[gene_biotype] = 1
elif feature == "transcript":
# Extract gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
if transcript_id in tr_ids_dic:
t2g_dic[transcript_id] = gene_id
else:
continue
f.close()
# Create gene biotype counts dictionary for given transcript IDs.
gbtc_dic = {}
for tr_id in t2g_dic:
gene_id = t2g_dic[tr_id]
gbt = g2bt_dic[gene_id]
if gbt in gbtc_dic:
gbtc_dic[gbt] += 1
else:
gbtc_dic[gbt] = 1
# Check and return to barracks.
assert gbtc_dic, "gene biotype counts dictionary for given transcript IDs empty"
return gbtc_dic
################################################################################
def gtf_get_transcript_infos(tr_ids_dic, in_gtf):
"""
Get transcript infos (transcript biotype, gene ID, gene name, gene biotype)
from dictionary of transcript IDs (tr_ids_dic).
Return dictionary with:
transcript ID -> [transcript biotype, gene ID, gene name, gene biotype]
>>> tr_ids_dic = {'ENST01': 1}
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_get_transcript_infos(tr_ids_dic, in_gtf)
{'ENST01': ['lncRNA', 'ENSG01', 'ABC1', 'transcribed_unprocessed_pseudogene']}
"""
# Checks.
assert tr_ids_dic, "given dictionary tr_ids_dic empty"
# Transcript to info dictionary.
t2i_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if not feature == "transcript":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
if not transcript_id in tr_ids_dic:
continue
# Extract gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Extract gene biotype.
m = re.search('gene_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('gene_type "(.+?)"', infos)
assert m, "gene_biotype or gene_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_biotype = m.group(1)
# Extract transcript biotype.
m = re.search('transcript_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('transcript_type "(.+?)"', infos)
assert m, "transcript_biotype or transcript_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_biotype = m.group(1)
m = re.search('gene_name "(.+?)"', infos)
assert m, "gene_name entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_name = m.group(1)
t2i_dic[transcript_id] = [tr_biotype, gene_id, gene_name, gene_biotype]
f.close()
# Check and return to joint.
assert t2i_dic, "transcript infos dictionary for given transcript IDs empty"
return t2i_dic
################################################################################
def gtf_get_gene_infos(gene_ids_dic, in_gtf):
"""
Get gene infos (gene name, gene biotype) for gene IDs from dictionary
of gene IDs (gene_ids_dic).
Return dictionary with:
gene ID -> [gene name, gene biotype]
>>> gene_ids_dic = {'ENSG01': 1}
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_get_gene_infos(gene_ids_dic, in_gtf)
{'ENSG01': ['ABC1', 'transcribed_unprocessed_pseudogene']}
"""
# Checks.
assert gene_ids_dic, "given dictionary gene_ids_dic empty"
# Gene to info dictionary.
g2i_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if not feature == "gene":
continue
# Extract gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
if not gene_id in gene_ids_dic:
continue
# Extract gene biotype.
m = re.search('gene_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('gene_type "(.+?)"', infos)
assert m, "gene_biotype or gene_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_biotype = m.group(1)
m = re.search('gene_name "(.+?)"', infos)
assert m, "gene_name entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_name = m.group(1)
g2i_dic[gene_id] = [gene_name, gene_biotype]
f.close()
# Check and return to joint.
assert g2i_dic, "gene infos dictionary for given gene IDs empty"
return g2i_dic
################################################################################
def gtf_get_transcript_biotypes(tr_ids_dic, in_gtf):
"""
Get transcript biotype labels + counts (return label -> count dic)
for a set of transcript IDs (tr_ids_dic) and a given GTF file (in_gtf).
>>> tr_ids_dic = {'ENST01': 1, 'ENST02': 1}
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_get_transcript_biotypes(tr_ids_dic, in_gtf)
{'lncRNA': 2}
"""
# Checks.
assert tr_ids_dic, "given dictionary tr_ids_dic empty"
# Biotype to count dic.
tbt2c_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if not feature == "transcript":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
if not transcript_id in tr_ids_dic:
continue
# Extract transcript biotype.
m = re.search('transcript_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('transcript_type "(.+?)"', infos)
assert m, "transcript_biotype or transcript_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_biotype = m.group(1)
# Store biotype info.
if tr_biotype not in tbt2c_dic:
tbt2c_dic[tr_biotype] = 1
else:
tbt2c_dic[tr_biotype] += 1
f.close()
# Check and return to barracks.
assert tbt2c_dic, "no transcript biotype information read in"
return tbt2c_dic
################################################################################
def gtf_get_transcript_ids(in_gtf):
"""
Get transcript IDs from in_gtf GTF file.
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_get_transcript_ids(in_gtf)
{'ENST01': 1, 'ENST02': 1}
"""
# Transcript IDs dictionary.
tr_ids_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if not feature == "transcript":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Store transcript ID.
tr_ids_dic[transcript_id] = 1
f.close()
# Check and return to barracks.
assert tr_ids_dic, "no transcript IDs read in"
return tr_ids_dic
################################################################################
def gtf_get_gene_biotypes(gene_ids_dic, in_gtf,
all_gbtc_dic=None):
"""
Get gene biotype labels + counts (return label -> count dic)
for a set of gene IDs (gene_ids_dic) and a given GTF file (in_gtf).
all_gbtc_dic:
If all_gbtc_dic dictionary is given, fill up this dictionary with
gene biotype labels and total counts for these in in_gtf GTF file.
(not just counts for selected genes).
>>> gene_ids_dic = {'ENSG01': 1, 'ENSG02': 1}
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_get_gene_biotypes(gene_ids_dic, in_gtf)
{'transcribed_unprocessed_pseudogene': 2}
"""
# Checks.
assert gene_ids_dic, "empty gene IDs dictionary given"
# Biotype to count dic.
gbtc_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if not feature == "gene":
continue
# Extract gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Extract gene biotype.
m = re.search('gene_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('gene_type "(.+?)"', infos)
assert m, "gene_biotype or gene_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_biotype = m.group(1)
if all_gbtc_dic is not None:
if gene_biotype not in all_gbtc_dic:
all_gbtc_dic[gene_biotype] = 1
else:
all_gbtc_dic[gene_biotype] += 1
if not gene_id in gene_ids_dic:
continue
# Store biotype info.
if gene_biotype not in gbtc_dic:
gbtc_dic[gene_biotype] = 1
else:
gbtc_dic[gene_biotype] += 1
f.close()
# Check and return to shack.
assert gbtc_dic, "no gene biotype information read in"
return gbtc_dic
################################################################################
def gtf_count_isoforms_per_gene(in_gtf,
gene_ids_dic=False):
"""
Count isoforms for each gene and return dictionary with:
gene_id -> isoform_count.
gene_ids_dic:
Gene IDs for which to return isoform counts. Per default, retrun counts
for all gene IDs.
>>> in_gtf = "test_data/gene_test_in.gtf"
>>> gtf_count_isoforms_per_gene(in_gtf)
{'ENSG01': 1, 'ENSG02': 1}
"""
# Gene ID to isoform count dic.
id2c_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if not feature == "transcript":
continue
# Extract gene ID.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
if gene_ids_dic:
if not gene_id in gene_ids_dic:
continue
if gene_id in id2c_dic:
id2c_dic[gene_id] += 1
else:
id2c_dic[gene_id] = 1
f.close()
# Check and return to shotgun shack.
assert id2c_dic, "no gene ID -> isoform count information read in"
return id2c_dic
################################################################################
def convert_genome_positions_to_transcriptome(in_bed, out_folder,
in_gtf, tr_ids_dic,
intersectBed_f=1,
int_whole_nr=True,
ignore_ids_dic=False):
"""
Converts a BED file with genomic coordinates into a BED file with
transcriptome coordinates. A GTF file with exon features needs to be
supplied. A dictionary of transcript IDs defines to which transcripts
the genomic regions will be mapped to. Note that input BED file column
4 is used as region ID and should be unique.
Output files are:
genomic_exon_coordinates.bed
Genomic exon coordinates extracted from .gtf file
transcript_exon_coordinates.bed
Transcript exon coordinates calculated from genomic ones
hit_exon_overlap.bed
Overlap between genomic input and exon regions
transcript_matches_complete.bed
Input regions that fully (completely) map to transcript regions
transcript_matches_incomplete.bed
Input regions that partly (incompletely) map to transcript regions
transcript_matches_complete_unique.bed
Unique + complete (full-length) matches
transcript_matches_all_unique.bed
All unique (complete+incomplete) matches
hit_transcript_exons.bed
Genomic coordinates of exons of transcripts with mapped regions
hit_transcript_stats.out
Various statistics for transcripts with hits
e.g. gene ID, gene name, gene biotype, # unique complete hits,
# unique all hits, # complete hits, # all hits
NOTE that function has been tested with .gtf files from Ensembl. .gtf files
from different sources sometimes have a slightly different format, which
could lead to incompatibilities / errors. See test files for format that
works.
Some tested Ensembl GTF files:
Homo_sapiens.GRCh38.97.gtf.gz
Mus_musculus.GRCm38.81.gtf.gz
Mus_musculus.GRCm38.79.gtf.gz
Requirements:
bedTools (tested with version 2.29.0)
GTF file needs to have exons sorted (minus + plus strand exons, see test.gtf
below as an example). Sorting should be the default (at least for tested
Ensembl GTF files).
>>> tr_ids_dic = {"ENST001" : 1, "ENST002" : 1}
>>> in_bed = "test_data/map_test_in.bed"
>>> in_gtf = "test_data/map_test_in.gtf"
>>> comp_uniq_exp = "test_data/map_test_out_all_unique.bed"
>>> comp_uniq_out = "test_data/map_out/transcript_hits_all_unique.bed"
>>> tr_stats_exp = "test_data/map_test_out_transcript_stats.out"
>>> tr_stats_out = "test_data/map_out/hit_transcript_stats.out"
>>> out_folder = "test_data/map_out"
>>> convert_genome_positions_to_transcriptome(in_bed, out_folder, in_gtf, tr_ids_dic, intersectBed_f=0.5)
>>> diff_two_files_identical(comp_uniq_exp, comp_uniq_out)
True
>>> diff_two_files_identical(tr_stats_exp, tr_stats_out)
True
"""
# Check for bedtools.
assert is_tool("bedtools"), "bedtools not in PATH"
# Results output folder.
if not os.path.exists(out_folder):
os.makedirs(out_folder)
# Output files.
genome_exon_bed = out_folder + "/" + "genomic_exon_coordinates.bed"
transcript_exon_bed = out_folder + "/" + "transcript_exon_coordinates.bed"
overlap_out = out_folder + "/" + "hit_exon_overlap.bed"
complete_transcript_hits_bed = out_folder + "/" + "transcript_hits_complete.bed"
incomplete_transcript_hits_bed = out_folder + "/" + "transcript_hits_incomplete.bed"
uniq_complete_out = out_folder + "/" + "transcript_hits_complete_unique.bed"
uniq_all_out = out_folder + "/" + "transcript_hits_all_unique.bed"
hit_tr_exons_bed = out_folder + "/" + "hit_transcript_exons.bed"
hit_tr_stats_out = out_folder + "/" + "hit_transcript_stats.out"
# Check for unique .bed IDs.
assert bed_check_unique_ids(in_bed), "in_bed \"%s\" column 4 IDs not unique" % (in_bed)
# Remove IDs to ignore from transcript IDs dictionary.
if ignore_ids_dic:
for seq_id in ignore_ids_dic:
del tr_ids_dic[seq_id]
# Output genomic exon regions.
OUTBED = open(genome_exon_bed, "w")
# Read in exon features from GTF file.
tr2gene_id_dic = {}
tr2gene_name_dic = {}
tr2gene_biotype_dic = {}
c_gtf_ex_feat = 0
# dic for sanity checking exon number order.
tr2exon_nr_dic = {}
# dic of lists, storing exon lengths and IDs.
tr_exon_len_dic = {}
tr_exon_id_dic = {}
exon_id_tr_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
if not feature == "exon":
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Extract transcript ID and from infos.
m = re.search('gene_id "(.+?)"', infos)
assert m, "gene_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_id = m.group(1)
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
# Extract gene name.
m = re.search('gene_name "(.+?)"', infos)
assert m, "gene_name entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_name = m.group(1)
# Extract gene biotype.
m = re.search('gene_biotype "(.+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('gene_type "(.+?)"', infos)
assert m, "gene_biotype or gene_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
gene_biotype = m.group(1)
# Check if transcript ID is in transcript dic.
if not transcript_id in tr_ids_dic:
continue
# Check whether exon numbers are incrementing for each transcript ID.
if not transcript_id in tr2exon_nr_dic:
tr2exon_nr_dic[transcript_id] = exon_nr
else:
assert tr2exon_nr_dic[transcript_id] < exon_nr, "transcript ID \"%s\" without increasing exon number order in GTF file \"%s\"" %(transcript_id, in_gtf)
tr2exon_nr_dic[transcript_id] = exon_nr
# Make exon count 3-digit.
#add = ""
#if exon_nr < 10:
# add = "00"
#if exon_nr >= 10 and exon_nr < 100:
# add = "0"
# Count exon entry.
c_gtf_ex_feat += 1
# Construct exon ID.
exon_id = transcript_id + "_e" + str(exon_nr)
# Store more infos.
tr2gene_name_dic[transcript_id] = gene_name
tr2gene_biotype_dic[transcript_id] = gene_biotype
tr2gene_id_dic[transcript_id] = gene_id
exon_id_tr_dic[exon_id] = transcript_id
# Store exon lengths in dictionary of lists.
feat_l = feat_e - feat_s
if not transcript_id in tr_exon_len_dic:
tr_exon_len_dic[transcript_id] = [feat_l]
else:
tr_exon_len_dic[transcript_id].append(feat_l)
# Store exon IDs in dictionary of lists.
if not transcript_id in tr_exon_id_dic:
tr_exon_id_dic[transcript_id] = [exon_id]
else:
tr_exon_id_dic[transcript_id].append(exon_id)
# Output genomic exon region.
OUTBED.write("%s\t%i\t%i\t%s\t0\t%s\n" % (chr_id,feat_s,feat_e,exon_id,feat_pol))
OUTBED.close()
f.close()
# Check for read-in features.
assert c_gtf_ex_feat, "no exon features read in from \"%s\"" %(in_gtf)
# Output transcript exon regions.
OUTBED = open(transcript_exon_bed, "w")
tr_exon_starts_dic = {}
# Calculate transcript exon coordinates from in-order exon lengths.
for tr_id in tr_exon_len_dic:
start = 0
for exon_i, exon_l in enumerate(tr_exon_len_dic[tr_id]):
exon_id = tr_exon_id_dic[tr_id][exon_i]
new_end = start + exon_l
# Store exon transcript start positions (0-based).
tr_exon_starts_dic[exon_id] = start
OUTBED.write("%s\t%i\t%i\t%s\t0\t+\n" % (tr_id,start,new_end,exon_id))
# Set for next exon.
start = new_end
OUTBED.close()
# Get input .bed region lengths and scores.
id2site_sc_dic = {}
id2site_len_dic = {}
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_sc = float(cols[4])
site_pol = cols[5]
assert site_pol == "+" or site_pol == "-", "invalid strand (in_bed: %s, site_pol: %s)" %(in_bed, site_pol)
# Check whether score is whole number.
if int_whole_nr:
if not site_sc % 1:
site_sc = int(site_sc)
# Store score and length of each genomic input site.
id2site_sc_dic[site_id] = site_sc
id2site_len_dic[site_id] = site_e - site_s
f.close()
# Number input sites.
c_in_bed_sites = len(id2site_len_dic)
# Calculate overlap between genome exon .bed and input .bed.
intersect_params = "-s -wb -f %f" %(intersectBed_f)
intersect_bed_files(in_bed, genome_exon_bed, intersect_params, overlap_out)
# Calculate hit region transcript positions.
# Store complete and incomplete hits in separate .bed files.
OUTINC = open(incomplete_transcript_hits_bed, "w")
OUTCOM = open(complete_transcript_hits_bed, "w")
c_complete = 0
c_incomplete = 0
c_all = 0
# Count site hits dic.
c_site_hits_dic = {}
# ID to site stats dic of lists.
id2stats_dic = {}
# ID to hit length dic.
id2hit_len_dic = {}
# Transcripts with hits dic.
match_tr_dic = {}
# Transcript ID to unique complete hits dic.
tr2uniq_com_hits_dic = {}
# Transcript ID to unique all hits dic.
tr2uniq_all_hits_dic = {}
# Transcript ID to complete hits dic.
tr2com_hits_dic = {}
# Transcript ID to all hits dic.
tr2all_hits_dic = {}
# Site ID to transcript ID dic.
site2tr_id_dic = {}
with open(overlap_out) as f:
for line in f:
cols = line.strip().split("\t")
s_gen_hit = int(cols[1])
e_gen_hit = int(cols[2])
site_id = cols[3]
s_gen_exon = int(cols[7])
e_gen_exon = int(cols[8])
exon_id = cols[9]
exon_pol = cols[11]
c_all += 1
# Count how many transcriptome matches site has.
if site_id in c_site_hits_dic:
c_site_hits_dic[site_id] += 1
else:
c_site_hits_dic[site_id] = 1
# Exon transcript start position (0-based).
s_tr_exon = tr_exon_starts_dic[exon_id]
# Hit length.
l_gen_hit = e_gen_hit - s_gen_hit
# Site length.
l_site = id2site_len_dic[site_id]
# Hit region transcript positions (plus strand).
hit_tr_s_pos = s_gen_hit - s_gen_exon + s_tr_exon
hit_tr_e_pos = hit_tr_s_pos + l_gen_hit
# If exon on reverse (minus) strand.
if exon_pol == "-":
hit_tr_s_pos = e_gen_exon - e_gen_hit + s_tr_exon
hit_tr_e_pos = hit_tr_s_pos + l_gen_hit
# Site score.
site_sc = id2site_sc_dic[site_id]
# Transcript ID.
tr_id = exon_id_tr_dic[exon_id]
# Store transcript ID for each site ID.
# In case site ID has several transcript hits, this will be overwritten,
# but we just use this dic for unique hits so no problem.
site2tr_id_dic[site_id] = tr_id
# Store transcript ID has a match.
match_tr_dic[tr_id] = 1
# If site score round number, make integer.
if not site_sc % 1:
site_sc = int(site_sc)
site_sc = str(site_sc)
# Store hit stats list (bed row) for each site.
bed_row = "%s\t%i\t%i\t%s\t%s\t+" %(tr_id, hit_tr_s_pos, hit_tr_e_pos, site_id, site_sc)
if not site_id in id2stats_dic:
id2stats_dic[site_id] = [bed_row]
else:
id2stats_dic[site_id].append(bed_row)
id2hit_len_dic[site_id] = l_gen_hit
if l_gen_hit == l_site:
# Output complete hits.
OUTCOM.write("%s\n" % (bed_row))
# Count complete hits per transcript.
if tr_id in tr2com_hits_dic:
tr2com_hits_dic[tr_id] += 1
else:
tr2com_hits_dic[tr_id] = 1
else:
# Output incomplete hits.
OUTINC.write("%s\n" % (bed_row))
# Count all hits per transcript.
if tr_id in tr2all_hits_dic:
tr2all_hits_dic[tr_id] += 1
else:
tr2all_hits_dic[tr_id] = 1
OUTCOM.close()
OUTINC.close()
f.close()
# Output unique hits (two files, one for complete hits, other for all).
OUTUNIALL = open(uniq_all_out, "w")
OUTUNICOM = open(uniq_complete_out, "w")
for site_id in c_site_hits_dic:
c_hits = c_site_hits_dic[site_id]
if c_hits != 1:
continue
l_hit = id2hit_len_dic[site_id]
l_site = id2site_len_dic[site_id]
tr_id = site2tr_id_dic[site_id]
bed_row = id2stats_dic[site_id][0]
if l_hit == l_site:
# Store unique + complete hit.
OUTUNICOM.write("%s\n" % (bed_row))
if tr_id in tr2uniq_com_hits_dic:
tr2uniq_com_hits_dic[tr_id] += 1
else:
tr2uniq_com_hits_dic[tr_id] = 1
# Store unique hit (complete or incomplete).
OUTUNIALL.write("%s\n" % (bed_row))
if tr_id in tr2uniq_all_hits_dic:
tr2uniq_all_hits_dic[tr_id] += 1
else:
tr2uniq_all_hits_dic[tr_id] = 1
OUTUNICOM.close()
OUTUNIALL.close()
# For all transcripts with mapped regions, store exons.bed + stats.out.
OUTEXBED = open(hit_tr_exons_bed, "w")
OUTSTATS = open(hit_tr_stats_out, "w")
# Statistics out file header.
OUTSTATS.write("tr_id\tchr\tgen_s\tgen_e\tpol\tgene_id\tgene_name\tgene_biotype\ttr_len\tcomp_hits\tall_hits\tuniq_comp_hits\tuniq_all_hits\n")
# transcript stats.
tr2len_dic = {}
tr2gen_s_dic = {}
tr2gen_e_dic = {}
tr2gen_chr_dic = {}
tr2gen_pol_dic = {}
with open(genome_exon_bed) as f:
for line in f:
cols = line.strip().split("\t")
chr_id = cols[0]
ex_s = int(cols[1])
ex_e = int(cols[2])
ex_id = cols[3]
ex_pol = cols[5]
ex_l = ex_e - ex_s
# Print out exons of transcripts with hits.
tr_id = exon_id_tr_dic[ex_id]
# Store transcripts lengths.
if tr_id in tr2len_dic:
tr2len_dic[tr_id] += ex_l
else:
tr2len_dic[tr_id] = ex_l
# Store more transcript stats.
if tr_id in tr2gen_s_dic:
if ex_s < tr2gen_s_dic[tr_id]:
tr2gen_s_dic[tr_id] = ex_s
else:
tr2gen_s_dic[tr_id] = ex_s
if tr_id in tr2gen_e_dic:
if ex_e > tr2gen_e_dic[tr_id]:
tr2gen_e_dic[tr_id] = ex_e
else:
tr2gen_e_dic[tr_id] = ex_e
tr2gen_chr_dic[tr_id] = chr_id
tr2gen_pol_dic[tr_id] = ex_pol
if tr_id in match_tr_dic:
bed_row = "%s\t%i\t%i\t%s\t0\t%s" %(chr_id, ex_s, ex_e, ex_id, ex_pol)
OUTEXBED.write("%s\n" % (bed_row))
OUTEXBED.close()
# Transcript hit statistics.
for tr_id in match_tr_dic:
gene_id = tr2gene_id_dic[tr_id]
gene_biotype = tr2gene_biotype_dic[tr_id]
gene_name = tr2gene_name_dic[tr_id]
tr_l = tr2len_dic[tr_id]
tr_chr = tr2gen_chr_dic[tr_id]
tr_pol = tr2gen_pol_dic[tr_id]
tr_gen_s = tr2gen_s_dic[tr_id]
tr_gen_e = tr2gen_e_dic[tr_id]
c_com_hits = 0
c_all_hits = 0
c_uniq_com_hits = 0
c_uniq_all_hits = 0
if tr_id in tr2com_hits_dic:
c_com_hits = tr2com_hits_dic[tr_id]
if tr_id in tr2all_hits_dic:
c_all_hits = tr2all_hits_dic[tr_id]
if tr_id in tr2uniq_com_hits_dic:
c_uniq_com_hits = tr2uniq_com_hits_dic[tr_id]
if tr_id in tr2uniq_all_hits_dic:
c_uniq_all_hits = tr2uniq_all_hits_dic[tr_id]
stats_row = "%s\t%s\t%i\t%i\t%s\t%s\t%s\t%s\t%i\t%i\t%i\t%i\t%i" %(tr_id, tr_chr, tr_gen_s, tr_gen_e, tr_pol, gene_id, gene_name, gene_biotype, tr_l, c_com_hits, c_all_hits, c_uniq_com_hits, c_uniq_all_hits)
OUTSTATS.write("%s\n" % (stats_row))
OUTSTATS.close()
################################################################################
def bed_get_transcript_annotations_from_gtf(tr_ids_dic, in_bed, in_gtf, out_tra,
stats_dic=None,
codon_annot=False,
border_annot=False,
split_size=60,
merge_split_regions=True):
"""
Get transcript region annotations for genomic BED file in_bed, given
a GTF file with following annotations:
five_prime_utr
CDS
three_prime_utr
start_codon (optionally)
stop_codon (optionally)
Non of these (default if no overlap)
tr_ids_dic:
Transcript IDs dictionary, defines from which transcripts to use
annotations. Be sure to not use overlapping transcripts, otherwise
site annotations might be merged ones from different transcripts.
stats_dic:
If not None, extract statistics from transcript annotations and store
in stats_dic.
codon_annot:
Add start + stop codon region labels to regions overlapping with
annotated start or stop codons (from in_gtf). S: start, E: stop
border_annot:
Add Transcript and exon border labels (from in_gtf).
A: transcript start nt, Z: transcript end nt, B: exon border nts
split_size:
Split size for outputting labels (FASTA style row width).
merge_split_regions:
If True, merge labels from IDs with format id1_p1, id1_p2 .. into one.
Also works for _e1, _e2 .. labels.
The transcript regions have to be mapped to genome first, where regions
across exon borders can be split up, resulting in ids: id1_p1, id1_p2 ..
Annotate these regions, then later merge _p1, _p2 before outputting to
.tra file.
in_bed can also be exon regions, with id1_e1, id1_e2 .. where IDs
are transcript IDs.
Use following labels:
five_prime_utr -> F, CDS -> C, three_prime_utr -> T, none -> N
start_codon -> S, stop_codon -> E
Output .tra file with format:
>transcript_region_id
FFFFSSSCCCC
CCCCCCCCCCC
...
>>> tr_ids_dic = {"ENST1": 1, "ENST2": 1}
>>> in_bed = "test_data/test_tr_annot.bed"
>>> in_gtf = "test_data/test_tr_annot.gtf"
>>> tmp_tra = "test_data/test_tr_annot.tmp.tra"
>>> tmp_codon_tra = "test_data/test_tr_annot_codons.tmp.tra"
>>> exp_tra = "test_data/test_tr_annot.exp.tra"
>>> exp_codon_tra = "test_data/test_tr_annot.codons.exp.tra"
>>> bed_get_transcript_annotations_from_gtf(tr_ids_dic, in_bed, in_gtf, tmp_tra)
>>> diff_two_files_identical(tmp_tra, exp_tra)
True
>>> bed_get_transcript_annotations_from_gtf(tr_ids_dic, in_bed, in_gtf, tmp_codon_tra, codon_annot=True)
>>> diff_two_files_identical(tmp_codon_tra, exp_codon_tra)
True
"""
# Temp .bed file for storing genomic transcript annotations.
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
random_id = uuid.uuid1()
tmp_out = str(random_id) + ".tmp.out"
if stats_dic is not None:
stats_dic["F"] = 0
stats_dic["C"] = 0
stats_dic["T"] = 0
stats_dic["N"] = 0
stats_dic["total_pos"] = 0
# Count sites with these symbols.
if codon_annot:
stats_dic["S"] = 0
stats_dic["E"] = 0
if border_annot:
stats_dic["A"] = 0
stats_dic["Z"] = 0
stats_dic["B"] = 0
# Feature dictionary.
feat_dic = {}
feat_dic["five_prime_utr"] = "F"
feat_dic["CDS"] = "C"
feat_dic["three_prime_utr"] = "T"
# Since CDS feature is separate from codons, make them part of CDS too.
feat_dic["start_codon"] = "C"
feat_dic["stop_codon"] = "C"
# If codon labels should be added.
if codon_annot:
feat_dic["start_codon"] = "S"
feat_dic["stop_codon"] = "E"
# If border (transcript + exon) labels should be added.
if border_annot:
feat_dic["transcript"] = 1
feat_dic["exon"] = 1
# Read in in_bed, store start + end coordinates.
id2s_dic = {}
id2e_dic = {}
id2parts_dic = {}
# Store position labels list for each site in dic.
id2labels_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
# Check if site ID is split site ID with _e or _p.
if re.search('.+_[pe]\d+$', site_id):
m = re.search('(.+)_[pe]\d+$', site_id)
core_id = m.group(1)
if core_id in id2parts_dic:
id2parts_dic[core_id] += 1
else:
id2parts_dic[core_id] = 1
else:
assert site_id not in id2parts_dic, "non-unique site ID \"%s\" in in_bed" %(site_id)
id2parts_dic[site_id] = 1
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
site_l = site_e - site_s
id2labels_dic[site_id] = ["N"]*site_l
f.closed
assert id2s_dic, "given in_bed \"%s\" empty?" %(in_bed)
# Get transcript annotations from GTF and output them as BED regions.
gtf_write_transcript_annotations_to_bed(tr_ids_dic, in_gtf, tmp_bed,
set_feat_dic=feat_dic,
border_annot=border_annot,
codon_annot=codon_annot)
# Preferred labels, i.e. do not overwrite these if present at position.
pref_labels_dic = {}
if codon_annot:
pref_labels_dic["S"] = 1
pref_labels_dic["E"] = 1
if border_annot:
pref_labels_dic["A"] = 1
pref_labels_dic["Z"] = 1
pref_labels_dic["B"] = 1
# Run overlap calculation to get exon overlapping regions.
intersect_params = "-s -wb"
intersect_bed_files(in_bed, tmp_bed, intersect_params, tmp_out)
"""
Example output:
$ intersectBed -a sites.bed -b annot.bed -s -wb
chr1 1000 1020 site1 0 + chr1 980 1020 F 0 +
chr1 1020 1023 site1 0 + chr1 1020 1023 S 0 +
chr1 1020 1050 site1 0 + chr1 1020 1500 C 0 +
"""
with open(tmp_out) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
s = int(cols[1]) + 1 # Make one-based.
e = int(cols[2])
site_id = cols[3]
site_s = id2s_dic[site_id] + 1 # Make one-based.
site_e = id2e_dic[site_id]
site_pol = cols[5]
label = cols[9]
# + case.
if site_pol == "+":
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = i - site_s
if id2labels_dic[site_id][li] not in pref_labels_dic:
id2labels_dic[site_id][li] = label
else:
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = site_e - i
if id2labels_dic[site_id][li] not in pref_labels_dic:
id2labels_dic[site_id][li] = label
f.closed
# Output transcript region annotations to .tra file.
OUTLAB = open(out_tra,"w")
if merge_split_regions:
# Merge split regions.
for site_id in id2parts_dic:
# Parts count.
part_c = id2parts_dic[site_id]
label_str = ""
# For one-part regions, just output.
if part_c == 1:
# List to string.
label_str = "".join(id2labels_dic[site_id])
else:
# For split regions, assemble parts and output.
new_label_list = []
for i in range(part_c):
i += 1
part_id = site_id + "_p%i" %(i)
if part_id not in id2labels_dic:
# Try exon ID.
part_id = site_id + "_e%i" %(i)
if part_id not in id2labels_dic:
assert False, "exon or part ID for site ID \"%i\" (part# %i) missing in id2labels_dic" %(site_id, i)
new_label_list += id2labels_dic[part_id]
assert new_label_list, "merging split region label lists failed"
# List to string.
label_str = "".join(new_label_list)
# New FASTA style output.
OUTLAB.write(">%s\n" %(site_id))
for i in range(0, len(label_str), split_size):
OUTLAB.write("%s\n" %((label_str[i:i+split_size])))
#OUTLAB.write("%s\t%s\n" %(site_id, label_str))
else:
# Do not merge split regions, just output labels for each site.
for site_id in id2labels_dic:
# List to string.
label_str = "".join(id2labels_dic[site_id])
OUTLAB.write(">%s\n" %(site_id))
for i in range(0, len(label_str), split_size):
OUTLAB.write("%s\n" %((label_str[i:i+split_size])))
OUTLAB.close()
if stats_dic:
tra_dic = read_cat_feat_into_dic(out_tra)
for seq_id in tra_dic:
label_str = tra_dic[seq_id]
stats_dic["total_pos"] += len(label_str)
# Count occurences (+1 for each site with label) for these labels.
occ_labels = ["S", "E", "A", "Z", "B"]
for ocl in occ_labels:
if re.search("%s" %(ocl), label_str):
stats_dic[ocl] += 1
for i in range(len(label_str)):
l = label_str[i]
if l not in occ_labels:
stats_dic[l] += 1
# Take out the trash.
litter_street = True
if litter_street:
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
if os.path.exists(tmp_out):
os.remove(tmp_out)
################################################################################
def get_transcript_border_annotations(tr_ids_dic, in_gtf, out_bed,
append=False):
"""
Get transcript border annotations and write border positions to
out_bed BED. Additional transcript annotations include:
A : transcript start
Z : transcript end
B : Exon border position
append:
If True, append content to out_bed, instead of overwriting any
existing out_bed.
>>> tr_ids_dic = {'ENST1': 1, 'ENST2': 1, 'ENST3': 1}
>>> in_gtf = "test_data/test_border_annot.gtf"
>>> out_exp_bed = "test_data/test_border_annot.exp.bed"
>>> out_tmp_bed = "test_data/test_border_annot.tmp.bed"
>>> get_transcript_border_annotations(tr_ids_dic, in_gtf, out_tmp_bed)
>>> diff_two_files_identical(out_tmp_bed, out_exp_bed)
True
"""
# Checker.
assert tr_ids_dic, "given dictionary tr_ids_dic empty"
# Get exon counts for transcripts from GTF.
tr_exc_dic = gtf_extract_exon_numbers(in_gtf, tr_ids_dic=tr_ids_dic)
# Features to look at.
feat_dic = {'transcript': 1, 'exon': 1}
# Extract transcript border annotations from in_gtf.
if append:
TBAOUT = open(out_bed, "a")
else:
TBAOUT = open(out_bed, "w")
# Count processed features.
c_out = 0
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
# Feature check.
if feature not in feat_dic:
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_id = m.group(1)
# Only features from selected transcripts.
if not tr_id in tr_ids_dic:
continue
# Get exon count for transcript.
assert tr_id in tr_exc_dic, "transcript ID \"%s\" not in tr_exc_dic" %(tr_id)
exc = tr_exc_dic[tr_id]
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
# Get transcript start and end positions.
if feature == "transcript":
# Start position.
s_start = feat_s
e_start = feat_s + 1
# End position.
s_end = feat_e - 1
e_end = feat_e
if feat_pol == "-":
s_start = feat_e - 1
e_start = feat_e
s_end = feat_s
e_end = feat_s + 1
# Output positions to BED.
TBAOUT.write("%s\t%i\t%i\tA\t0\t%s\n" %(chr_id, s_start, e_start, feat_pol))
TBAOUT.write("%s\t%i\t%i\tZ\t0\t%s\n" %(chr_id, s_end, e_end, feat_pol))
# Get exon border positions.
if feature == "exon":
# For single exon transcripts, no exon borders between A + Z.
if exc == 1:
continue
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
# Get infos based on exon number.
if exon_nr == 1:
# First exon.
s_end = feat_e - 1
e_end = feat_e
if feat_pol == "-":
s_end = feat_s
e_end = feat_s + 1
TBAOUT.write("%s\t%i\t%i\tB\t0\t%s\n" %(chr_id, s_end, e_end, feat_pol))
elif exc == exon_nr:
# Last exon.
s_start = feat_s
e_start = feat_s + 1
if feat_pol == "-":
s_start = feat_e - 1
e_start = feat_e
TBAOUT.write("%s\t%i\t%i\tB\t0\t%s\n" %(chr_id, s_start, e_start, feat_pol))
else:
# In-between exon.
s_start = feat_s
e_start = feat_s + 1
s_end = feat_e - 1
e_end = feat_e
if feat_pol == "-":
s_start = feat_e - 1
e_start = feat_e
s_end = feat_s
e_end = feat_s + 1
TBAOUT.write("%s\t%i\t%i\tB\t0\t%s\n" %(chr_id, s_start, e_start, feat_pol))
TBAOUT.write("%s\t%i\t%i\tB\t0\t%s\n" %(chr_id, s_end, e_end, feat_pol))
# Output labeled region.
c_out += 1
f.close()
TBAOUT.close()
assert c_out, "no transcript or exon features found or output"
################################################################################
def gtf_write_transcript_annotations_to_bed(tr_ids_dic, in_gtf, out_bed,
set_feat_dic=False,
border_annot=False,
codon_annot=False):
"""
Extract transcript region annotations from in_gtf GTF file and store
annotations as BED regions in out_bed.
Get transcript region annotations for genomic BED file in_bed, given
a GTF file with following annotations:
five_prime_utr
CDS
three_prime_utr
start_codon (optionally)
stop_codon (optionally)
Use following labels:
five_prime_utr -> F, CDS -> C, three_prime_utr -> T
start_codon -> S, stop_codon -> E
Transcript start -> A
Transcript end -> Z
Exon border -> B
tr_ids_dic:
Transcript IDs dictionary, defines from which transcripts to use
annotations. Be sure to not use overlapping transcripts, otherwise
site annotations might be merged ones from different transcripts.
set_feat_dic:
Overwrite features dictionary defined inside function, using the
supplied one set_feat_dic.
border_annot:
Also output transcript + exon border positions to out_bed.
Exon border labels (B) are added both to exon start + end, unless
it is the first or last or the only exon (3 distinctions).
codon_annot:
Also output start_codon and stop_codon features.
out_bed example output (notice labels as column 4 IDs):
chr1 980 1020 F 0 +
chr1 1020 1023 S 0 +
chr1 1020 1500 C 0 +
...
>>> in_gtf = "test_data/test_tr_annot.gtf"
>>> tr_ids_dic = {"ENST1": 1, "ENST2": 1}
>>> out_bed = "test_data/test_tr_annot_gtf.tmp.bed"
>>> exp_bed = "test_data/test_tr_annot_gtf.exp.bed"
>>> gtf_write_transcript_annotations_to_bed(tr_ids_dic, in_gtf, out_bed,codon_annot=True, border_annot=True)
>>> diff_two_files_identical(out_bed, exp_bed)
True
"""
# Feature dictionary.
feat_dic = {}
feat_dic["five_prime_utr"] = "F"
feat_dic["CDS"] = "C"
feat_dic["three_prime_utr"] = "T"
# Since CDS feature is separate from codons, make them part of CDS too.
feat_dic["start_codon"] = "C"
feat_dic["stop_codon"] = "C"
# If start / stop codon annotations should be added too.
if codon_annot:
feat_dic["start_codon"] = "S"
feat_dic["stop_codon"] = "E"
if border_annot:
feat_dic["transcript"] = 1
feat_dic["exon"] = 1
# Overwrite feat_dic if given to function.
if set_feat_dic:
feat_dic = set_feat_dic
# If border (exon + transcript) annotations should be added too.
tr_exc_dic = {}
if border_annot:
# Get exon counts for transcripts from GTF.
tr_exc_dic = gtf_extract_exon_numbers(in_gtf, tr_ids_dic=tr_ids_dic)
# Extract transcript annotations from in_gtf.
TRAOUT = open(out_bed, "w")
c_out = 0
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
chr_id = cols[0]
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
feat_pol = cols[6]
infos = cols[8]
# Extract only features in feat_dic.
if feature not in feat_dic:
continue
label = feat_dic[feature]
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_id = m.group(1)
# Only features from selected transcripts.
if tr_id not in tr_ids_dic:
continue
# Restrict to standard chromosomes.
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
continue
else:
chr_id = new_chr_id
# Make start coordinate 0-base (BED standard).
feat_s = feat_s - 1
if feature == "transcript" or feature == "exon":
if not border_annot:
continue
# Get exon count for transcript.
assert tr_id in tr_exc_dic, "transcript ID \"%s\" not in tr_exc_dic" %(tr_id)
exc = tr_exc_dic[tr_id]
# Get transcript start and end positions.
if feature == "transcript":
# Start position.
s_start = feat_s
e_start = feat_s + 1
# End position.
s_end = feat_e - 1
e_end = feat_e
if feat_pol == "-":
s_start = feat_e - 1
e_start = feat_e
s_end = feat_s
e_end = feat_s + 1
# Output positions to BED.
TRAOUT.write("%s\t%i\t%i\tA\t0\t%s\n" %(chr_id, s_start, e_start, feat_pol))
TRAOUT.write("%s\t%i\t%i\tZ\t0\t%s\n" %(chr_id, s_end, e_end, feat_pol))
# Get exon border positions.
if feature == "exon":
# For single exon transcripts, no exon borders between A + Z.
if exc == 1:
continue
# Extract exon number.
m = re.search('exon_number "(\d+?)"', infos)
# Try Gencode encoding.
if not m:
m = re.search('exon_number (\d+?);', infos)
assert m, "exon_number entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
exon_nr = int(m.group(1))
# Get infos based on exon number.
if exon_nr == 1:
# First exon.
s_end = feat_e - 1
e_end = feat_e
if feat_pol == "-":
s_end = feat_s
e_end = feat_s + 1
TRAOUT.write("%s\t%i\t%i\tB\t0\t%s\n" %(chr_id, s_end, e_end, feat_pol))
elif exc == exon_nr:
# Last exon.
s_start = feat_s
e_start = feat_s + 1
if feat_pol == "-":
s_start = feat_e - 1
e_start = feat_e
TRAOUT.write("%s\t%i\t%i\tB\t0\t%s\n" %(chr_id, s_start, e_start, feat_pol))
else:
# In-between exon.
s_start = feat_s
e_start = feat_s + 1
s_end = feat_e - 1
e_end = feat_e
if feat_pol == "-":
s_start = feat_e - 1
e_start = feat_e
s_end = feat_s
e_end = feat_s + 1
TRAOUT.write("%s\t%i\t%i\tB\t0\t%s\n" %(chr_id, s_start, e_start, feat_pol))
TRAOUT.write("%s\t%i\t%i\tB\t0\t%s\n" %(chr_id, s_end, e_end, feat_pol))
c_out += 1
# Skip rest.
continue
c_out += 1
# Output labeled region.
TRAOUT.write("%s\t%i\t%i\t%s\t0\t%s\n" %(chr_id, feat_s, feat_e, label, feat_pol))
f.close()
TRAOUT.close()
assert c_out, "no transcript annotation regions output"
################################################################################
def get_seq_len_list_from_dic(seqs_dic):
"""
Given a dictinary with sequences, return a list of sequence lengths.
>>> seqs_dic = {'seq1':'ACGT', 'seq2': 'ACGTACGT'}
>>> get_seq_len_list_from_dic(seqs_dic)
[4, 8]
"""
assert seqs_dic, "sequences dictionary seqs_dic empty"
len_list = []
for seq_id in seqs_dic:
len_list.append(len(seqs_dic[seq_id]))
assert len_list, "sequence lengths list len_list empty"
return len_list
################################################################################
def calc_seq_entropy(seq_l, ntc_dic):
"""
Given a dictionary of nucleotide counts for a sequence ntc_dic and
the length of the sequence seq_l, compute the Shannon entropy of
the sequence.
Formula (see CE formula) taken from:
https://www.ncbi.nlm.nih.gov/pubmed/15215465
>>> seq_l = 8
>>> ntc_dic = {'A': 8, 'C': 0, 'G': 0, 'U': 0}
>>> calc_seq_entropy(seq_l, ntc_dic)
0
>>> ntc_dic = {'A': 4, 'C': 4, 'G': 0, 'U': 0}
>>> calc_seq_entropy(seq_l, ntc_dic)
0.5
>>> ntc_dic = {'A': 2, 'C': 2, 'G': 2, 'U': 2}
>>> calc_seq_entropy(seq_l, ntc_dic)
1.0
"""
# For DNA or RNA, k = 4.
k = 4
# Shannon entropy.
ce = 0
for nt in ntc_dic:
c = ntc_dic[nt]
if c != 0:
ce += (c/seq_l) * log((c/seq_l), k)
if ce == 0:
return 0
else:
return -1*ce
################################################################################
def dic_sum_up_lengths(in_dic):
"""
Given a dictionary with strings or numbers, sum up the numbers /
string lengths and return the total length.
Currently works for integer numbers and strings.
>>> in_dic = {'e1': 5, 'e2': 10}
>>> dic_sum_up_lengths(in_dic)
15
>>> in_dic = {'e1': 'ACGT', 'e2': 'ACGTACGT'}
>>> dic_sum_up_lengths(in_dic)
12
"""
assert in_dic, "given dictionary in_dic empty"
sum = 0
for e in in_dic:
v = in_dic[e]
if isinstance(v, str):
sum += len(v)
elif isinstance(v, int):
sum += v
else:
assert False, "non-string or non-integer dictionary value given"
return sum
################################################################################
def seqs_dic_count_nt_freqs(seqs_dic,
rna=False,
convert_to_uc=False,
count_dic=False):
"""
Given a dictionary with sequences seqs_dic, count how many times each
nucleotide is found in all sequences (== get nt frequencies).
Return nucleotide frequencies count dictionary.
By default, a DNA dictionary (A,C,G,T) is used, counting only these
characters (note they are uppercase!).
rna:
Instead of DNA dictionary, use RNA dictionary (A,C,G,U) for counting.
convert_to_uc:
Convert sequences to uppercase before counting.
count_dic:
Supply a custom dictionary for counting only characters in
this dictionary + adding counts to this dictionary.
>>> seqs_dic = {'s1': 'AAAA', 's2': 'CCCGGT'}
>>> seqs_dic_count_nt_freqs(seqs_dic)
{'A': 4, 'C': 3, 'G': 2, 'T': 1}
>>> seqs_dic_count_nt_freqs(seqs_dic, rna=True)
{'A': 4, 'C': 3, 'G': 2, 'U': 0}
"""
assert seqs_dic, "given dictionary seqs_dic empty"
if not count_dic:
count_dic = {'A': 0, 'C': 0, 'G': 0, 'T': 0}
if rna:
count_dic = {'A': 0, 'C': 0, 'G': 0, 'U': 0}
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
if convert_to_uc:
seq = seq.upper()
seq_count_nt_freqs(seq, rna=rna, count_dic=count_dic)
return count_dic
################################################################################
def seqs_dic_count_chars(seqs_dic):
"""
Given a dictionary with sequences, count how many times each character
appears.
>>> seqs_dic = {'s1': 'ABCC', 's2': 'ABCD'}
>>> seqs_dic_count_chars(seqs_dic)
{'A': 2, 'B': 2, 'C': 3, 'D': 1}
"""
assert seqs_dic, "given seqs_dic empty"
cc_dic = {}
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
for c in seq:
if c in cc_dic:
cc_dic[c] += 1
else:
cc_dic[c] = 1
assert cc_dic, "cc_dic empty"
return cc_dic
################################################################################
def seq_count_nt_freqs(seq,
rna=False,
count_dic=False):
"""
Count nucleotide (character) frequencies in given sequence seq.
Return count_dic with frequencies.
If count_dic is given, add count to count_dic.
rna:
Instead of DNA dictionary, use RNA dictionary (A,C,G,U) for counting.
count_dic:
Supply a custom dictionary for counting only characters in
this dictionary + adding counts to this dictionary.
>>> seq = 'AAAACCCGGT'
>>> seq_count_nt_freqs(seq)
{'A': 4, 'C': 3, 'G': 2, 'T': 1}
>>> seq = 'acgtacgt'
>>> seq_count_nt_freqs(seq)
{'A': 0, 'C': 0, 'G': 0, 'T': 0}
"""
assert seq, "given sequence string seq empty"
if not count_dic:
count_dic = {'A': 0, 'C': 0, 'G': 0, 'T': 0}
if rna:
count_dic = {'A': 0, 'C': 0, 'G': 0, 'U': 0}
# Conver to list.
seq_list = list(seq)
for nt in seq_list:
if nt in count_dic:
count_dic[nt] += 1
return count_dic
################################################################################
def fasta_get_repeat_region_annotations(seqs_dic, out_rra,
split_size=60,
stats_dic=None):
"""
Get repeat region annotations for genomic and transcript regions,
given a dictionary of sequences with lower- and uppercase sequences.
When extracting sequences from .2bit using twoBitToFa, not enabling
-noMask results in mixed lower- and uppercase regions returned.
Lowercase: region annotated as repeat by RepeatMasker and
Tandem Repeats Finder (with period of 12 or less).
Uppercase: non-repeat region.
Thus, add position-wise R (repeat) or N (no repeat) annotations
to label repeat and non-repeat region sequences and output to
rra_out.
split_size:
Split size for outputting labels (FASTA style row width).
stats_dic:
If not None, extract statistics from repeat annotations and store
in stats_dic.
Output format example:
>seq1
NNRRRRNN
>seq2
NNNNNNN
...
>>> seqs_dic = {'seq1': 'ACacgtAC', 'seq2': 'ACGUACG', 'seq3': 'acguacguu'}
>>> out_exp_rra = "test_data/test8.exp.rra"
>>> out_tmp_rra = "test_data/test8.tmp.rra"
>>> fasta_get_repeat_region_annotations(seqs_dic, out_tmp_rra)
>>> diff_two_files_identical(out_tmp_rra, out_exp_rra)
True
"""
assert seqs_dic, "given dictionary seqs_dic empty"
if stats_dic is not None:
stats_dic["R"] = 0
stats_dic["N"] = 0
stats_dic["total_pos"] = 0
OUTRRA = open(out_rra,"w")
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
rra_str = ""
seq_list = list(seq)
for nt in seq_list:
if nt.islower():
rra_str += "R"
else:
rra_str += "N"
#OUTRRA.write("%s\t%s\n" %(seq_id, rra_str))
OUTRRA.write(">%s\n" %(seq_id))
for i in range(0, len(rra_str), split_size):
OUTRRA.write("%s\n" %((rra_str[i:i+split_size])))
if stats_dic:
for l in rra_str:
stats_dic["total_pos"] += 1
stats_dic[l] += 1
OUTRRA.close()
################################################################################
def seqs_dic_calc_entropies(seqs_dic,
rna=True,
return_dic=False,
uc_part_only=True):
"""
Given a dictionary of sequences, calculate entropies for each sequence
and return list of entropy values.
seqs_dic:
Dictionary with sequences.
rna:
Use RNA alphabet for counting (uppercase chars only)
uc_part_only:
Calculate entropy only for uppercase part of sequence
>>> seqs_dic = {'seq1': 'AAAAAAAA', 'seq2': 'AAAACCCC', 'seq3': 'AACCGGUU'}
>>> seqs_dic_calc_entropies(seqs_dic)
[0, 0.5, 1.0]
"""
assert seqs_dic, "given dictionary seqs_dic empty"
entr_list = []
if return_dic:
entr_dic = {}
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
seq_l = len(seq)
new_seq = seq
# If only uppercase part should be used.
if uc_part_only:
m = re.search("[acgtu]*([ACGTU]+)[acgtu]*", seq)
assert m, "uppercase sequence part extraction failed for sequence ID \"%s\" and sequence \"%s\"" %(seq_id, seq)
new_seq = m.group(1)
seq_l = len(new_seq)
# Make uppercase (otherwise seq_l not correct).
new_seq = new_seq.upper()
# Get nt count dic.
count_dic = seq_count_nt_freqs(new_seq, rna=rna)
# Calculate sequence entropy.
seq_entr = calc_seq_entropy(seq_l, count_dic)
#if seq_entr > 0.5:
# print("Entropy: %.2f" %(seq_entr))
# print("%s: %s" %(seq_id, seq))
if return_dic:
entr_dic[seq_id] = seq_entr
else:
entr_list.append(seq_entr)
if return_dic:
return entr_dic
else:
return entr_list
################################################################################
def ntc_dic_to_ratio_dic(ntc_dic,
perc=False):
"""
Given a dictionary of nucleotide counts, return dictionary of nucleotide
ratios (count / total nucleotide number).
perc:
If True, make percentages out of ratios (*100).
>>> ntc_dic = {'A': 5, 'C': 2, 'G': 2, 'T': 1}
>>> ntc_dic_to_ratio_dic(ntc_dic)
{'A': 0.5, 'C': 0.2, 'G': 0.2, 'T': 0.1}
"""
assert ntc_dic, "given dictionary ntc_dic empty"
# Get total number.
total_n = 0
for nt in ntc_dic:
total_n += ntc_dic[nt]
ntr_dic = {}
for nt in ntc_dic:
ntc = ntc_dic[nt]
ntr = ntc / total_n
if perc:
ntr = ntr*100
ntr_dic[nt] = ntr
return ntr_dic
################################################################################
def create_set_lengths_box_plot(pos_len_list, neg_len_list, out_plot,
disable_title=False,
theme=2,
scale_zero_max=False):
"""
Create a box plot, to compare sequence lengths found in positive
and negative set.
Given two lists of lengths (positives, negatives), create a dataframe
using Pandas, and use seaborn for plotting.
Store plot in out_plot.
"""
# Checker.
assert pos_len_list, "given list pos_len_list empty"
assert neg_len_list, "given list neg_len_list empty"
if scale_zero_max:
# Get maximum length for scaling.
pos_max = max(pos_len_list)
neg_max = max(neg_len_list)
max_l = pos_max
if pos_max < neg_max:
max_l = neg_max
# Get next highest number % 10.
max_y = max_l
while max_y % 10:
max_y += 1
# Make pandas dataframe.
pos_label = "Positives"
neg_label = "Negatives"
data = {'set': [], 'length': []}
pos_c = len(pos_len_list)
neg_c = len(neg_len_list)
data['set'] += pos_c*[pos_label] + neg_c*[neg_label]
data['length'] += pos_len_list + neg_len_list
df = pd.DataFrame (data, columns = ['set','length'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.boxplot(x="set", y="length", data=df, palette=['cyan','cyan'],
width=0.7, linewidth = 1.5, boxprops=dict(alpha=.7))
# Modify.
ax.set_ylabel("Length (nt)",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
if scale_zero_max:
ax.set_ylim([0,max_y])
if not disable_title:
ax.axes.set_title("Site length distribution", fontsize=20)
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight')
elif theme == 2:
"""
Midnight Blue theme.
ffffff : white
190250 : midnight blue
fcc826 : yellowish
fd3b9d : pinkish
2f19f3 : dash blue
"""
# Theme colors.
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Custom flier (outlier) edge and face colors.
flierprops = dict(markersize=5, markerfacecolor=box_color, markeredgecolor=text_color)
boxprops = dict(color=box_color, edgecolor=text_color)
medianprops = dict(color=text_color)
meanprops = dict(color=text_color)
whiskerprops = dict(color=text_color)
capprops = dict(color=text_color)
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.boxplot(x="set", y="length", data=df,
flierprops=flierprops,
boxprops=boxprops,
meanprops=meanprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
width=0.7, linewidth = 1.5)
# Modify.
ax.set_ylabel("Length (nt)",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
if scale_zero_max:
ax.set_ylim([0,max_y])
if not disable_title:
ax.axes.set_title("Site length distribution", fontsize=20)
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight', transparent=True)
################################################################################
def create_entropy_box_plot(pos_entr_list, neg_entr_list, out_plot,
theme=2,
disable_title=False):
"""
Create a box plot, to compare sequence entropies found in positive
and negative set.
Given two lists of entropies (positives, negatives), create a dataframe
using Pandas, and use seaborn for plotting.
Store plot in out_plot.
theme:
Choose between two themes for plotting, 1: default, 2: midnight blue
fig, ax = plt.subplots(figsize=(15,8))
sns.set(style="whitegrid", font_scale=1)
fig, ax = plt.subplots()
fig.savefig(
# Make plot.
sns.set(style="whitegrid")
ent_plot = sns.boxplot(x="set", y="entropy", data=df, palette=['lightgrey','lightgrey'],
width=0.7, linewidth = 1.5, boxprops=dict(alpha=.7))
# Modify.
ent_plot.set_ylabel("Sequence complexity",fontsize=18)
ent_plot.tick_params(axis='x', labelsize=18)
ent_plot.tick_params(axis='y', labelsize=12)
ent_plot.axes.set_title("Sequence complexity distribution", fontsize=20)
ent_plot.set(xlabel=None)
# Store plot.
ent_plot.figure.savefig(out_plot, dpi=125, bbox_inches='tight')
sns.set(style="darkgrid")
seaborn.set(rc={'axes.facecolor':'cornflowerblue', 'figure.facecolor':'cornflowerblue'})
seaborn.set(rc={'axes.facecolor':'cornflowerblue', 'figure.facecolor':'cornflowerblue'})
fig, ax = plt.subplots()
"figure.facecolor": "white",
"axes.labelcolor": dark_gray,
"text.color": dark_gray,
"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"grid.color": "white",
Midnight blue theme:
fig.savefig(out_plot, dpi=125, bbox_inches='tight', transparent=True)
Midnight Blue theme:
====================
HTML Hex colors:
ffffff : white
190250 : midnight blue
fcc826 : yellowish
fd3b9d : pinkish
2f19f3 : dash blue
bgcolor="#190250"
text="#ffffff"
link="#fd3b9d"
vlink="#fd3b9d"
alink="#fd3b9d"
"text.color": "white",
"""
# Checker.
assert pos_entr_list, "given list pos_entr_list empty"
assert neg_entr_list, "given list neg_entr_list empty"
# Make pandas dataframe.
pos_label = "Positives"
neg_label = "Negatives"
data = {'set': [], 'entropy': []}
pos_c = len(pos_entr_list)
neg_c = len(neg_entr_list)
data['set'] += pos_c*[pos_label] + neg_c*[neg_label]
data['entropy'] += pos_entr_list + neg_entr_list
df = pd.DataFrame (data, columns = ['set','entropy'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.boxplot(x="set", y="entropy", data=df, palette=['cyan','cyan'],
width=0.7, linewidth = 1.5, boxprops=dict(alpha=.7))
# Modify.
ax.set_ylabel("Sequence complexity",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
if not disable_title:
ax.axes.set_title("Sequence complexity distribution", fontsize=20)
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight')
elif theme == 2:
"""
Midnight Blue theme:
====================
HTML Hex colors:
ffffff : white
190250 : midnight blue
fcc826 : yellowish
fd3b9d : pinkish
2f19f3 : dash blue
bgcolor="#190250"
text="#ffffff"
link="#fd3b9d"
vlink="#fd3b9d"
alink="#fd3b9d"
Editing matplotlib boxplot element props:
(from matplotlib.axes.Axes.boxplot)
boxprops
whiskerprops
flierprops
medianprops
meanprops
"""
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Custom flier (outlier) edge and face colors.
flierprops = dict(markersize=5, markerfacecolor=box_color, markeredgecolor=text_color)
boxprops = dict(color=box_color, edgecolor=text_color)
medianprops = dict(color=text_color)
meanprops = dict(color=text_color)
whiskerprops = dict(color=text_color)
capprops = dict(color=text_color)
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.boxplot(x="set", y="entropy", data=df,
flierprops=flierprops,
boxprops=boxprops,
meanprops=meanprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
width=0.7, linewidth = 1.5)
# Modify.
ax.set_ylabel("Sequence complexity",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
# Boxplot whisker colors.
#plt.setp(ax.lines, color=text_color)
# Boxplot box edge + face colors.
#plt.setp(ax.artists, edgecolor=text_color, facecolor=box_color)
if not disable_title:
ax.axes.set_title("Sequence complexity distribution", fontsize=20)
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight', transparent=True)
################################################################################
def create_dint_ratios_grouped_bar_plot(pos_dintr_dic, neg_dintr_dic, out_plot,
disable_title=False,
theme=1):
"""
Create a grouped bar plot, showing the di-nucleotide ratios (16 classes)
in the positive and negative set.
Input ratio dictionaries for positives and negatives, with key being
di-nucleotide and value the ratio.
Create a dataframe using Pandas, and use seaborn for plotting.
Store plot in out_plot.
MV colors:
#69e9f6, #f154b2
"""
# Checker.
assert pos_dintr_dic, "given dictionary pos_dintr_dic empty"
assert neg_dintr_dic, "given dictionary neg_dintr_dic empty"
# Make pandas dataframe.
pos_label = "Positives"
neg_label = "Negatives"
data = {'set': [], 'dint': [], 'perc': []}
for dint in pos_dintr_dic:
data['set'].append(pos_label)
data['dint'].append(dint)
data['perc'].append(pos_dintr_dic[dint])
for dint in neg_dintr_dic:
data['set'].append(neg_label)
data['dint'].append(dint)
data['perc'].append(neg_dintr_dic[dint])
df = pd.DataFrame (data, columns = ['set','dint', 'perc'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
g = sns.catplot(x="dint", y="perc", hue="set", data=df, height=6,
kind="bar", palette=["#69e9f6", "#f154b2"],
edgecolor="lightgrey",
legend=False)
g.fig.set_figwidth(16)
g.fig.set_figheight(4)
# Modify axes.
ax = g.axes
ax[0,0].set_ylabel("Percentage (%)",fontsize=20)
ax[0,0].set(xlabel=None)
ax[0,0].tick_params(axis='x', labelsize=20)
ax[0,0].tick_params(axis='y', labelsize=16)
if not disable_title:
ax[0,0].axes.set_title("Di-nucleotide distribution", fontsize=22)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=16)
g.savefig(out_plot, dpi=100, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
g = sns.catplot(x="dint", y="perc", hue="set", data=df, height=6,
kind="bar", palette=["blue", "darkblue"],
edgecolor="#fcc826",
legend=False)
g.fig.set_figwidth(16)
g.fig.set_figheight(4)
# Modify axes.
ax = g.axes
ax[0,0].set_ylabel("Percentage (%)",fontsize=20)
ax[0,0].set(xlabel=None)
ax[0,0].tick_params(axis='x', labelsize=20)
ax[0,0].tick_params(axis='y', labelsize=16)
if not disable_title:
ax[0,0].axes.set_title("Di-nucleotide distribution", fontsize=22)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=16, framealpha=0)
g.savefig(out_plot, dpi=100, bbox_inches='tight', transparent=True)
################################################################################
def create_str_elem_grouped_bar_plot(pos_str_stats_dic, neg_str_stats_dic, out_plot,
disable_title=False,
theme=1):
"""
Create a grouped bar plot, showing average probabilities of secondary
structure elements (U, E, H, I, M, S) in the positive and negative set.
pos_str_stats_dic and neg_str_stats_dic contain the statistics for
the the positive and negative set (mean + stdev values).
Create a dataframe using Pandas, and use seaborn for plotting.
Store plot in out_plot.
Stats dictionary content.
stats_dic["U"] = [pu_mean, pu_stdev]
stats_dic["S"] = [ps_mean, ps_stdev]
stats_dic["E"] = [pe_mean, pe_stdev]
stats_dic["H"] = [ph_mean, ph_stdev]
stats_dic["I"] = [pi_mean, pi_stdev]
stats_dic["M"] = [pm_mean, pm_stdev]
"""
# Checker.
assert pos_str_stats_dic, "given dictionary pos_str_stats_dic empty"
assert neg_str_stats_dic, "given dictionary neg_str_stats_dic empty"
# Make pandas dataframe.
pos_label = "Positives"
neg_label = "Negatives"
data = {'set': [], 'elem': [], 'mean_p': [], 'stdev_p': []}
for el in pos_str_stats_dic:
if not re.search("^[U|S|E|H|I|M]$", el):
continue
data['set'].append(pos_label)
data['elem'].append(el)
data['mean_p'].append(pos_str_stats_dic[el][0])
data['stdev_p'].append(pos_str_stats_dic[el][1])
for el in neg_str_stats_dic:
if not re.search("^[U|S|E|H|I|M]$", el):
continue
data['set'].append(neg_label)
data['elem'].append(el)
data['mean_p'].append(neg_str_stats_dic[el][0])
data['stdev_p'].append(neg_str_stats_dic[el][1])
df = pd.DataFrame (data, columns = ['set','elem', 'mean_p', 'stdev_p'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
g = sns.catplot(x="elem", y="mean_p", hue="set", data=df, height=6,
kind="bar", palette=["#69e9f6", "#f154b2"],
edgecolor="lightgrey",
legend=False)
g.fig.set_figwidth(10)
g.fig.set_figheight(4)
# Modify axes.
ax = g.axes
ax[0,0].set_ylabel("Mean probability",fontsize=22)
ax[0,0].set(xlabel=None)
ax[0,0].tick_params(axis='x', labelsize=22)
ax[0,0].tick_params(axis='y', labelsize=17)
if not disable_title:
ax[0,0].axes.set_title("Structural elements distribution", fontsize=24)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=17)
g.savefig(out_plot, dpi=100, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
g = sns.catplot(x="elem", y="mean_p", hue="set", data=df, height=6,
kind="bar", palette=["blue", "darkblue"],
edgecolor="#fcc826",
legend=False)
g.fig.set_figwidth(10)
g.fig.set_figheight(4)
# Modify axes.
ax = g.axes
ax[0,0].set_ylabel("Mean probability",fontsize=22)
ax[0,0].set(xlabel=None)
ax[0,0].tick_params(axis='x', labelsize=22)
ax[0,0].tick_params(axis='y', labelsize=17)
if not disable_title:
ax[0,0].axes.set_title("Structural elements distribution", fontsize=24)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=17, framealpha=0)
g.savefig(out_plot, dpi=100, bbox_inches='tight', transparent=True)
################################################################################
def create_eval_model_comp_scatter_plot(model1_scores, model2_scores, out_plot,
x_label="Score model 1",
y_label="Score model 2",
theme=1):
"""
Create rnaprot eval scatter plot, to compare scores produced by
two models on same dataset. Also calculates and plots R2 (coefficient
of determination) value for two datasets.
"""
assert model1_scores, "model1_scores empty"
assert model2_scores, "model2_scores empty"
set1_c = len(model1_scores)
set2_c = len(model2_scores)
assert set1_c == set2_c, "differing set sizes for set1_c and set2_c (%i != %i)" %(set1_c, set2_c)
data = {'m1_score': [], 'm2_score': []}
for i,sc in enumerate(model1_scores):
data['m1_score'].append(sc)
data['m2_score'].append(model2_scores[i])
df = pd.DataFrame (data, columns = ['m1_score','m2_score'])
# Calculate R2.
r_squared = calc_r2_corr_measure(model1_scores, model2_scores)
r2str = "R2 = %.6f" %(r_squared)
# R2 text coordinates.
max_x = max(model1_scores)
min_y = min(model2_scores)
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.scatterplot(x="m1_score", y="m2_score", data=df, color='#69e9f6', s=3)
plt.text(max_x , min_y, r2str, color='black', horizontalalignment='right', size=10)
fig.set_figwidth(5)
fig.set_figheight(4)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.scatterplot(x="m1_score", y="m2_score", data=df, color='blue', s=3)
plt.text(max_x , min_y, r2str, color='blue', horizontalalignment='right', size=10)
fig.set_figwidth(5)
fig.set_figheight(4)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight', transparent=True)
################################################################################
def create_eval_rank_vs_sc_plot(ws_scores, out_plot,
x_label="site rank",
y_label="whole-site score",
theme=1):
"""
Plot rank of whole-site score (x-axis) vs score (y-axis).
"""
assert ws_scores, "ws_scores empty"
data = {'score': [], 'rank' : []}
#ws_scores.sort(reverse=True)
i = 0
for sc in sorted(ws_scores, reverse=True):
rank = i + 1
data['score'].append(sc)
data['rank'].append(rank)
i += 1
df = pd.DataFrame (data, columns = ['score', 'rank'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.lineplot(data=df, x='rank', y='score', color='#69e9f6')
fig.set_figwidth(6)
fig.set_figheight(4)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.lineplot(data=df, x='rank', y='score', color='blue')
fig.set_figwidth(6)
fig.set_figheight(4)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight', transparent=True)
################################################################################
def create_eval_kmer_score_kde_plot(set_scores, out_plot,
set_label="Positives",
x_label="k-mer score",
y_label="Density",
fig_width=5,
fig_height=4,
kde_bw_adjust=1,
x_0_to_100=False,
kde_clip=False,
theme=1):
"""
Create rnaprot eval kdeplot, plotting density for set of k-mer scores.
"""
assert set_scores, "set_scores empty"
if not kde_clip:
max_sc = max(set_scores)
min_sc = min(set_scores)
kde_clip = [min_sc, max_sc]
data = {'score': []}
data['score'] += set_scores
df = pd.DataFrame (data, columns = ['score'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.kdeplot(x="score", data=df, color='#69e9f6',
clip=kde_clip, bw_adjust=kde_bw_adjust)
fig.set_figwidth(fig_width)
fig.set_figheight(fig_height)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.kdeplot(x="score", data=df, color='blue',
clip=kde_clip, bw_adjust=kde_bw_adjust)
fig.set_figwidth(fig_width)
fig.set_figheight(fig_height)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
if x_0_to_100:
ax.set_xlim([0,100])
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight', transparent=True)
################################################################################
def create_eval_kde_plot(set1_scores, set2_scores, out_plot,
set1_label="Positives",
set2_label="Negatives",
x_label="Whole-site score",
y_label="Density",
theme=1):
"""
Create rnaprot eval kdeplot, plotting densities for two sets of
scores.
"""
assert set1_scores, "set1_scores empty"
assert set2_scores, "set2_scores empty"
set1_c = len(set1_scores)
set2_c = len(set2_scores)
# min+max for clipping.
max_sc = max([max(set1_scores),max(set2_scores)])
min_sc = min([min(set1_scores),min(set2_scores)])
kde_clip = [min_sc, max_sc]
# assert set1_c == set2_c, "differing set sizes for set1_c and set2_c (%i != %i)" %(set1_c, set2_c)
data = {'set': [], 'score': []}
data['set'] += set1_c*[set1_label] + set2_c*[set2_label]
data['score'] += set1_scores + set2_scores
df = pd.DataFrame (data, columns = ['set','score'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.kdeplot(x="score", data=df, hue="set", clip=kde_clip, legend=False,
palette=["#69e9f6", "#f154b2"])
fig.set_figwidth(5)
fig.set_figheight(4)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
#ax.legend(title='Smoker', loc='upper right', labels=['<NAME>', '<NAME>'])
ax.legend(loc='upper right', labels=['Negatives', 'Positives'], framealpha=0.4)
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
# aqua, deepskyblue
sns.kdeplot(x="score", data=df, hue="set", clip=kde_clip, legend=False,
palette=["blue", "deepskyblue"])
fig.set_figwidth(5)
fig.set_figheight(4)
ax.set(xlabel=x_label)
ax.set_ylabel(y_label)
ax.legend(loc='upper right', labels=['Negatives', 'Positives'], framealpha=0.2)
#ax.tick_params(axis='x', labelsize=18)
#ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=150, bbox_inches='tight', transparent=True)
################################################################################
def rp_eval_generate_html_report(ws_scores, neg_ws_scores,
out_folder, rplib_path,
html_report_out="report.rnaprot_eval.html",
onlyseq=True,
add_ws_scores=False,
id2wssc_dic=False,
idx2id_dic=False,
seqs_dic=False,
sal_peak_pos_dic=False,
sal_peak_sc_dic=False,
sal_peak_win_dic=False,
theme=1,
plots_subfolder="html_plots"):
"""
Generate HTML report for rnaprot eval, showing stats and plots regarding
whole site scores.
"""
# Checks.
assert ws_scores, "ws_scores empty"
assert neg_ws_scores, "neg_ws_scores empty"
assert os.path.exists(out_folder), "out_folder does not exist"
assert os.path.exists(rplib_path), "rplib_path does not exist"
assert idx2id_dic, "idx2id_dic needed"
if onlyseq:
assert id2wssc_dic, "id2wssc_dic needed"
assert sal_peak_pos_dic, "sal_peak_pos_dic needed"
assert sal_peak_sc_dic, "sal_peak_sc_dic needed"
assert sal_peak_win_dic, "sal_peak_win_dic needed"
# Import markdown to generate report.
from markdown import markdown
# Output subfolder for plots.
plots_folder = plots_subfolder
plots_out_folder = out_folder + "/" + plots_folder
if not os.path.exists(plots_out_folder):
os.makedirs(plots_out_folder)
# Output files.
html_out = out_folder + "/" + "report.rnaprot_eval.html"
if html_report_out:
html_out = html_report_out
# Plot files.
ws_sc_plot = "whole_site_scores_kde_plot.png"
rank_vs_sc_plot = "rank_vs_ws_score_plot.png"
model_comp_plot = "model_comparison_plot.html"
site_reg_imp_plot = "site_reg_imp_plot.png"
rank_vs_sc_plotly_plot = "rank_vs_sc_plotly.html"
ws_sc_plot_out = plots_out_folder + "/" + ws_sc_plot
rank_vs_sc_plot_out = plots_out_folder + "/" + rank_vs_sc_plot
model_comp_plot_out = plots_out_folder + "/" + model_comp_plot
site_reg_imp_plot_out = plots_out_folder + "/" + site_reg_imp_plot
rank_vs_sc_plotly_plot_out = plots_out_folder + "/" + rank_vs_sc_plotly_plot
# Logo paths.
logo1_path = rplib_path + "/content/logo1.png"
logo2_path = rplib_path + "/content/logo2.png"
sorttable_js_path = rplib_path + "/content/sorttable.js"
# plotly js path.
plotly_js_path = rplib_path + "/content/plotly-latest.min.js"
assert os.path.exists(plotly_js_path), "plotly js %s not found" %(plotly_js_path)
# Create theme-specific HTML header.
if theme == 1:
mdtext = """
<head>
<title>RNAProt - Model Evaluation Report</title>
<script src="%s" type="text/javascript"></script>
</head>
<img src="%s" alt="rp_logo"
title="rp_logo" width="600" />
""" %(sorttable_js_path, logo1_path)
elif theme == 2:
mdtext = """
<head>
<title>RNAProt - Model Evaluation Report</title>
<script src="%s" type="text/javascript"></script>
<style>
h1 {color:#fd3b9d;}
h2 {color:#fd3b9d;}
h3 {color:#fd3b9d;}
</style>
</head>
<img src="%s" alt="rp_logo"
title="rp_logo" width="500" />
<body style="font-family:sans-serif" bgcolor="#190250" text="#fcc826" link="#fd3b9d" vlink="#fd3b9d" alink="#fd3b9d">
""" %(sorttable_js_path, logo2_path)
else:
assert False, "invalid theme ID given"
# Add first section markdown.
mdtext += """
# Model Evaluation Report
List of available model evaluation statistics generated
by RNAProt (rnaprot eval):
- [Whole-site score distributions](#ws-scores-plot)
- [Saliency peak distribution](#site-reg-imp-plot)"""
if add_ws_scores:
mdtext += "\n"
mdtext += "- [Model comparison](#model-comp-plot)"
mdtext += "\n \n"
"""
Whole-site score distributions for positives and negatives.
"""
c_pos_sites = len(ws_scores)
c_neg_sites = len(neg_ws_scores)
print("Generate whole-site scores plot .. ")
# Plot whole-site score distributions for positives and negatives.
create_eval_kde_plot(ws_scores, neg_ws_scores, ws_sc_plot_out,
set1_label="Positives",
set2_label="Negatives",
x_label="Whole-site score",
y_label="Density",
theme=theme)
plot_path = plots_folder + "/" + ws_sc_plot
mdtext += """
## Whole-site score distributions ### {#ws-scores-plot}
Whole-site score distributions for the positive and negative training
set, scored by the trained model. Since the model was trained on these
two sequence sets, we expect on average higher scores for the positive
sequences (given a sufficient model performance).
"""
mdtext += '<img src="' + plot_path + '" alt="Whole-site score distributions"' + "\n"
mdtext += 'title="Whole-site score distributions" width="500" />' + "\n"
mdtext += """
**Figure:** Whole-site score distributions for the positive (Positives, %i sites)
and negative (Negatives, %i sites) training set, scored by the trained model.
""" %(c_pos_sites, c_neg_sites)
create_eval_rank_vs_sc_plot(ws_scores, rank_vs_sc_plot_out,
x_label="site rank",
y_label="whole-site score",
theme=theme)
plot_path = plots_folder + "/" + rank_vs_sc_plot
mdtext += '<img src="' + plot_path + '" alt="rank vs ws score distribution"' + "\n"
mdtext += 'title="rank vs ws score distribution" width="550" />' + "\n"
mdtext += """
**Figure:** Whole-site score distribution of the positive set, with site ranks
on x-axis and whole-site scores on y-axis.
"""
if onlyseq:
create_rank_vs_sc_plotly(seqs_dic, id2wssc_dic,
sal_peak_pos_dic, sal_peak_sc_dic,
sal_peak_win_dic,
rank_vs_sc_plotly_plot_out, plotly_js_path,
x_label="site rank",
y_label="whole-site score",
theme=theme)
plot_path = plots_folder + "/" + rank_vs_sc_plotly_plot
mdtext += '<div class=class="container-fluid" style="margin-top:40px">' + "\n"
mdtext += '<iframe src="' + plot_path + '" width="1200" height="1200"></iframe>' + "\n"
mdtext += '</div>'
mdtext += """
**Figure:** Whole-site score distribution of the positive set (scores > 0),
with site ranks by score on x-axis, and whole-site scores on y-axis.
Hover boxes contain the following additional infos for each sequence:
Saliency peak position and score, saliency peak region and peak sequence.
"""
print("Generate site region importance plot .. ")
peak_pos_perc_list = []
for seq_id in sal_peak_pos_dic:
l_seq = len(seqs_dic[seq_id])
sal_peak_pos = sal_peak_pos_dic[seq_id] + 1
peak_pos_perc = (sal_peak_pos / l_seq ) * 100
peak_pos_perc_list.append(peak_pos_perc)
assert peak_pos_perc_list, "peak_pos_perc_list empty"
kde_clip = [0, 100]
kde_bw_adjust = 0.4
create_eval_kmer_score_kde_plot(peak_pos_perc_list, site_reg_imp_plot_out,
x_label="Relative site position (%)",
y_label="Density",
fig_width=7,
fig_height=3,
kde_bw_adjust=kde_bw_adjust,
kde_clip=kde_clip,
x_0_to_100=True,
theme=theme)
plot_path = plots_folder + "/" + site_reg_imp_plot
mdtext += """
## Saliency peak distribution ### {#site-reg-imp-plot}
This plot shows, averaged over all positive sites with model score > 0,
the distribution of saliency peak positions over the whole site length.
"""
mdtext += '<img src="' + plot_path + '" alt="site_reg_imp_distr"' + "\n"
mdtext += 'title="Saliency peak distribution" width="650" />' + "\n"
mdtext += """
**Figure:** Distribution of saliency peaks over the whole site length,
averaged over all positive sites with model score > 0.
"""
"""
Model comparison plot.
"""
if add_ws_scores:
r_squared = calc_r2_corr_measure(ws_scores, add_ws_scores)
print("r_squared:", r_squared)
print("Generate --train-in vs --add-train-in model comparison plot ... ")
create_m1m2sc_plotly_scatter_plot(ws_scores, add_ws_scores, idx2id_dic,
model_comp_plot_out, plotly_js_path,
seqs_dic=seqs_dic,
theme=theme)
plot_path = plots_folder + "/" + model_comp_plot
mdtext += """
## Model comparison ### {#model-comp-plot}
To compare two models, the postive training set is scored with two models
and the two model scores are displayed as a scatter plot. More similar
models should show higher correlation, resulting in a higher
[R2 score](https://en.wikipedia.org/wiki/Coefficient_of_determination).
"""
mdtext += '<div class=class="container-fluid" style="margin-top:40px">' + "\n"
mdtext += '<iframe src="' + plot_path + '" width="1200" height="1200"></iframe>' + "\n"
mdtext += '</div>'
mdtext += """
**Figure:** Model comparison scatter plot, comparing whole-site model scores
on the positive training set for the two input models. Model 1: model from
--train-in folder. Model 2: model from --add-train-in. R2 = %.6f.
""" %(r_squared)
print("Generate HTML report ... ")
# Convert mdtext to html.
md2html = markdown(mdtext, extensions=['attr_list', 'tables'])
#OUTMD = open(md_out,"w")
#OUTMD.write("%s\n" %(mdtext))
#OUTMD.close()
OUTHTML = open(html_out,"w")
OUTHTML.write("%s\n" %(md2html))
OUTHTML.close()
# change <table> to sortable.
check_cmd = "sed -i 's/<table>/<table class=" + '"sortable"' + ">/g' " + html_out
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "sed command returned error:\n%s" %(output)
################################################################################
def calc_r2_corr_measure(scores1, scores2,
is_dic=False):
"""
Calculate R2 measure.
is_dic:
If scores1 + scores2 are dictionaries.
"""
assert len(scores1) == len(scores2), "len(scores1) != len(scores2)"
if is_dic:
sc1 = []
sc2 = []
for dic_key in scores1:
sc1.append(scores1[dic_key])
sc2.append(scores2[dic_key])
correlation_matrix = np.corrcoef(sc1, sc2)
else:
correlation_matrix = np.corrcoef(scores1, scores2)
correlation_xy = correlation_matrix[0,1]
return correlation_xy**2
################################################################################
def create_rank_vs_sc_plotly(seqs_dic, id2wssc_dic,
sal_peak_pos_dic, sal_peak_sc_dic,
sal_peak_win_dic,
out_html, plotly_js,
x_label="site rank",
y_label="whole-site score",
theme=1):
"""
Plot rank of whole-site score (x-axis) vs score (y-axis).
"""
ws_sc_df = "whole_site_score"
sal_peak_pos_df = "saliency_peak_position"
sal_peak_sc_df = "saliency_peak_score"
sal_win_coords_df = "saliency_peak_region"
sal_win_seq_df = "saliency_peak_win_seq"
seq_df = "seq"
seq_id_df = "seq_id"
rank_df = "rank"
data = {ws_sc_df : [], sal_peak_pos_df : [], sal_peak_sc_df : [], sal_win_coords_df : [], sal_win_seq_df : [], seq_df : [], seq_id_df : [], rank_df : []}
ws_rank = 0
for seq_id, ws_sc in sorted(id2wssc_dic.items(), key=lambda item: item[1], reverse=True):
ws_rank += 1
if ws_sc < 0 or seq_id not in sal_peak_pos_dic:
break
sal_peak_pos = sal_peak_pos_dic[seq_id] + 1 # make 1-based.
sal_peak_sc = sal_peak_sc_dic[seq_id]
seq = seqs_dic[seq_id]
sal_peak_s = sal_peak_win_dic[seq_id][0]
sal_peak_e = sal_peak_win_dic[seq_id][1]
sal_win_coords = "%i-%i" % (sal_peak_s+1, sal_peak_e)
sal_win_seq = seq[sal_peak_s:sal_peak_e]
data[ws_sc_df].append(ws_sc)
data[sal_peak_pos_df].append(sal_peak_pos)
data[sal_peak_sc_df].append(sal_peak_sc)
data[sal_win_coords_df].append(sal_win_coords)
data[sal_win_seq_df].append(sal_win_seq)
data[seq_df].append(seq)
data[seq_id_df].append(seq_id)
data[rank_df].append(ws_rank)
df = pd.DataFrame(data, columns = [ws_sc_df, sal_peak_pos_df, sal_peak_sc_df, sal_win_coords_df, sal_win_seq_df, seq_df, seq_id_df, rank_df])
# Color of dots.
dot_col = "#69e9f6"
if theme == 2:
dot_col = "blue"
# Axis labels.
ax_labels_dic = {rank_df : x_label, ws_sc_df : y_label}
plot = px.scatter(data_frame=df, x=rank_df, y=ws_sc_df, hover_name=seq_id_df,
labels=ax_labels_dic,
hover_data=[sal_peak_pos_df, sal_peak_sc_df, sal_win_coords_df, sal_win_seq_df, seq_df],
color_discrete_sequence=[dot_col])
plot.layout.template = 'seaborn'
plot.update_layout(hoverlabel=dict(font_size=11))
plot.update_traces(marker=dict(size=3))
plot.write_html(out_html,
full_html=False,
include_plotlyjs=plotly_js)
################################################################################
def create_m1m2sc_plotly_scatter_plot(ws_scores, add_ws_scores, idx2id_dic,
out_html, plotly_js,
seqs_dic=False,
theme=1):
"""
Create plotly graph plot, plotting model1 scores (ws_scores) against
model2 scores (add_ws_scores) for the positive set.
ws_scores:
List with positive set whole-site scores for model1.
add_ws_scores:
List with positive set whole-site scores for model2.
idx2id_dic:
List to sequence ID mapping dic.
out_html:
Output .html path to store interactive plotly graph.
plotly_js:
Path to plotly js plotly-latest.min.js.
seqs_dic:
Sequence ID to sequence mapping dic.
"""
assert ws_scores, "given ws_scores empty"
assert add_ws_scores, "given add_ws_scores empty"
assert idx2id_dic, "given idx2id_dic empty"
assert len(ws_scores) == len(add_ws_scores), "len(ws_scores) != len(add_ws_scores)"
m1_label = "Model 1 score"
m2_label = "Model 2 score"
id_label = "Sequence ID"
seq_label = "Sequence"
data = {m1_label : [], m2_label : [], id_label : []}
if seqs_dic:
data[seq_label] = []
max_m1sc = -1000
min_m1sc = 1000
max_m2sc = -1000
min_m2sc = 1000
for idx, m1sc in enumerate(ws_scores):
m2sc = add_ws_scores[idx]
seq_id = idx2id_dic[idx]
if m1sc > max_m1sc:
max_m1sc = m1sc
if m1sc < min_m1sc:
min_m1sc = m1sc
if m2sc > max_m2sc:
max_m2sc = m2sc
if m2sc < min_m2sc:
min_m2sc = m2sc
data[m1_label].append(m1sc)
data[m2_label].append(m2sc)
data[id_label].append(seq_id)
if seqs_dic:
data[seq_label].append(seqs_dic[seq_id])
# Get min and max axis values for scaling.
min_sc = min_m1sc
max_sc = max_m1sc
if min_sc > min_m2sc:
min_sc = min_m2sc
if max_sc < max_m2sc:
max_sc = max_m2sc
# Color of dots.
dot_col = "#69e9f6"
if theme == 2:
dot_col = "blue"
if seqs_dic:
df = pd.DataFrame(data, columns = [m1_label, m2_label, id_label, seq_label])
plot = px.scatter(data_frame=df, x=m1_label, y=m2_label, hover_name=id_label,
hover_data=[seq_label],
color_discrete_sequence=[dot_col])
else:
df = pd.DataFrame(data, columns = [m1_label, m2_label, id_label])
plot = px.scatter(data_frame=df, x=m1_label, y=m2_label, hover_name=id_label,
color_discrete_sequence=[dot_col])
plot.layout.template = 'seaborn'
plot.update_layout(yaxis_range=[min_sc, max_sc])
plot.update_layout(xaxis_range=[min_sc, max_sc])
plot.update_layout(hoverlabel=dict(font_size=12))
plot.write_html(out_html,
full_html=False,
include_plotlyjs=plotly_js)
################################################################################
def create_kmer_sc_plotly_scatter_plot(pos_mer_dic, neg_mer_dic, k,
out_html, plotly_js,
theme=1):
"""
Create plotly graph plot, containing k-mer scores of positive
and negative set, and store in .html file.
pos_mer_dic:
dic with k-mer percentages of positive set.
neg_mer_dic:
dic with k-mer percentages of negative set.
k:
k in k-mer.
out_html:
Output .html path to store interactive (!) plotly graph.
plotly_js:
Path to plotly js plotly-latest.min.js.
"""
assert pos_mer_dic, "given pos_mer_dic empty"
assert neg_mer_dic, "given neg_mer_dic empty"
assert len(pos_mer_dic) == len(neg_mer_dic), "len(pos_mer_dic) != len(neg_mer_dic)"
pos_label = "k-mer % positives"
neg_label = "k-mer % negatives"
kmer_label = "k-mer"
data = {pos_label : [], neg_label : [], kmer_label : []}
max_pos_perc = 0
max_neg_perc = 0
for kmer in pos_mer_dic:
pos_perc = pos_mer_dic[kmer]
neg_perc = neg_mer_dic[kmer]
if pos_perc != 0:
pos_perc = round(pos_perc, k)
if neg_perc != 0:
neg_perc = round(neg_perc, k)
if pos_perc > max_pos_perc:
max_pos_perc = pos_perc
if neg_perc > max_neg_perc:
max_neg_perc = neg_perc
data[pos_label].append(pos_perc)
data[neg_label].append(neg_perc)
data[kmer_label].append(kmer)
# Get min and max axis values for scaling.
min_perc = 0
max_perc = max_pos_perc
if max_neg_perc > max_pos_perc:
max_perc = max_neg_perc
# Find out how to round up max_perc.
if re.search("\d+\.\d+", str(max_perc)):
m = re.search("(\d+)\.(\d+)", str(max_perc))
left = str(m.group(1))
right = str(m.group(2))
else:
assert False, "no pattern match on max_perc"
if left == "0":
for i,c in enumerate(right):
prec = i + 1
if c != "0":
# Custom decimal round up.
max_perc = decimal_ceil(max_perc, prec)
break
else:
# Round up to whole number with math.ceil.
max_perc = ceil(max_perc)
df = pd.DataFrame(data, columns = [pos_label, neg_label, kmer_label])
# Color of dots.
dot_col = "#69e9f6"
if theme == 2:
dot_col = "blue"
plot = px.scatter(data_frame=df, x=pos_label, y=neg_label, hover_name=kmer_label,
color_discrete_sequence=[dot_col])
plot.layout.template = 'seaborn'
plot.update_layout(yaxis_range=[min_perc, max_perc])
plot.update_layout(xaxis_range=[min_perc, max_perc])
plot.write_html(out_html,
full_html=False,
include_plotlyjs=plotly_js)
################################################################################
def rp_gt_generate_html_report(pos_seqs_dic, neg_seqs_dic, out_folder,
dataset_type, rplib_path,
html_report_out=False,
plots_subfolder=False,
pos_str_stats_dic=False,
neg_str_stats_dic=False,
pos_phastcons_stats_dic=False,
neg_phastcons_stats_dic=False,
pos_phylop_stats_dic=False,
neg_phylop_stats_dic=False,
pos_eia_stats_dic=False,
neg_eia_stats_dic=False,
pos_tra_stats_dic=False,
neg_tra_stats_dic=False,
pos_rra_stats_dic=False,
neg_rra_stats_dic=False,
add_feat_dic_list=False,
target_gbtc_dic=False,
all_gbtc_dic=False,
t2hc_dic=False,
t2i_dic=False,
theme=1,
kmer_top=10,
target_top=10,
rna=True
):
"""
Generate HTML report for rnaprot gt, comparing extracted positive
with negative set.
pos_seqs_dic:
Positive set sequences dictionary.
neg_seqs_dic:
Negative set sequences dictionary.
pos_str_stats_dic:
Positive set structure statistics dictionary
neg_str_stats_dic:
Negative set structure statistics dictionary
pos_phastcons_stats_dic:
Positive set phastcons scores statistics dictionary
neg_phastcons_stats_dic:
Negative set phastcons scores statistics dictionary
add_feat_dic_list:
List of dictionaries with additional BED feature statistics,
where positive and corresponding negative set are stored together,
so indices 1,2 3,4 5,6 ... belong together (positive stats dic first).
out_folder:
rnaprot gt results output folder, to store report in.
rna:
Set True if input sequences are RNA.
html_report_out:
HTML report output file.
target_gbtc_dic:
Gene biotype counts for target set dictionary.
all_gbtc_dic:
Gene biotype counts for all genes dictionary (gene biotype -> count)
t2hc_dic:
Transcript ID to hit count (# sites on transcript) dictionary.
t2i_dic:
Transcript ID to info list dictionary.
More to add:
References
RNAProt version
Command line call
"""
# Checks.
ds_types = {'s':1, 't':1, 'g':1}
assert dataset_type in ds_types, "invalid dataset type given (expected g, s, or t)"
# Import markdown to generate report.
from markdown import markdown
# Checks.
if add_feat_dic_list:
if len(add_feat_dic_list) % 2:
assert False, "even number of dictionaries expected for given add_feat_dic_list"
# Output subfolder for plots.
plots_folder = plots_subfolder
plots_out_folder = out_folder + "/" + plots_folder
if not os.path.exists(plots_out_folder):
os.makedirs(plots_out_folder)
# Output files.
html_out = out_folder + "/" + "report.rnaprot_gt.html"
if html_report_out:
html_out = html_report_out
#md_out = out_folder + "/" + "report.rnaprot_gt.md"
# Plot files.
lengths_plot = "set_lengths_plot.png"
entropy_plot = "sequence_complexity_plot.png"
dint_plot = "dint_percentages_plot.png"
str_elem_plot = "str_elem_plot.png"
phastcons_plot = "phastcons_plot.png"
phylop_plot = "phylop_plot.png"
eia_plot = "exon_intron_region_plot.png"
tra_plot = "transcript_region_plot.png"
rra_plot = "repeat_region_plot.png"
bed_cov_plot = "bed_feat_coverage_plot.png"
plotly_3mer_plot = "plotly_scatter_3mer.html"
plotly_4mer_plot = "plotly_scatter_4mer.html"
plotly_5mer_plot = "plotly_scatter_5mer.html"
lengths_plot_out = plots_out_folder + "/" + lengths_plot
entropy_plot_out = plots_out_folder + "/" + entropy_plot
dint_plot_out = plots_out_folder + "/" + dint_plot
str_elem_plot_out = plots_out_folder + "/" + str_elem_plot
phastcons_plot_out = plots_out_folder + "/" + phastcons_plot
phylop_plot_out = plots_out_folder + "/" + phylop_plot
eia_plot_out = plots_out_folder + "/" + eia_plot
tra_plot_out = plots_out_folder + "/" + tra_plot
rra_plot_out = plots_out_folder + "/" + rra_plot
bed_cov_plot_out = plots_out_folder + "/" + bed_cov_plot
plotly_3mer_plot_out = plots_out_folder + "/" + plotly_3mer_plot
plotly_4mer_plot_out = plots_out_folder + "/" + plotly_4mer_plot
plotly_5mer_plot_out = plots_out_folder + "/" + plotly_5mer_plot
print("Generate statistics for HTML report ... ")
# Site numbers.
c_pos_out = len(pos_seqs_dic)
c_neg_out = len(neg_seqs_dic)
# Site lengths.
pos_len_list = get_seq_len_list_from_dic(pos_seqs_dic)
neg_len_list = get_seq_len_list_from_dic(neg_seqs_dic)
# Get entropy scores for sequences.
pos_entr_list = seqs_dic_calc_entropies(pos_seqs_dic, rna=rna,
uc_part_only=False)
neg_entr_list = seqs_dic_calc_entropies(neg_seqs_dic, rna=rna,
uc_part_only=False)
# Get set nucleotide frequencies.
pos_ntc_dic = seqs_dic_count_nt_freqs(pos_seqs_dic, rna=rna,
convert_to_uc=True)
neg_ntc_dic = seqs_dic_count_nt_freqs(neg_seqs_dic, rna=rna,
convert_to_uc=True)
# Get nucleotide ratios.
pos_ntr_dic = ntc_dic_to_ratio_dic(pos_ntc_dic, perc=True)
neg_ntr_dic = ntc_dic_to_ratio_dic(neg_ntc_dic, perc=True)
# Get dinucleotide percentages.
pos_dintr_dic = seqs_dic_count_kmer_freqs(pos_seqs_dic, 2, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
neg_dintr_dic = seqs_dic_count_kmer_freqs(neg_seqs_dic, 2, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
# Get 3-mer percentages.
pos_3mer_dic = seqs_dic_count_kmer_freqs(pos_seqs_dic, 3, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
neg_3mer_dic = seqs_dic_count_kmer_freqs(neg_seqs_dic, 3, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
# Get 4-mer percentages.
pos_4mer_dic = seqs_dic_count_kmer_freqs(pos_seqs_dic, 4, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
neg_4mer_dic = seqs_dic_count_kmer_freqs(neg_seqs_dic, 4, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
# Get 5-mer percentages.
pos_5mer_dic = seqs_dic_count_kmer_freqs(pos_seqs_dic, 5, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
neg_5mer_dic = seqs_dic_count_kmer_freqs(neg_seqs_dic, 5, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
# Logo paths.
logo1_path = rplib_path + "/content/logo1.png"
logo2_path = rplib_path + "/content/logo2.png"
# Create theme-specific HTML header.
if theme == 1:
mdtext = """
<head>
<title>RNAProt - Training Set Generation Report</title>
</head>
<img src="%s" alt="rp_logo"
title="rnaprot_logo" width="600" />
""" %(logo1_path)
elif theme == 2:
mdtext = """
<head>
<title>RNAProt - Training Set Generation Report</title>
<style>
h1 {color:#fd3b9d;}
h2 {color:#fd3b9d;}
h3 {color:#fd3b9d;}
</style>
</head>
<img src="%s" alt="rp_logo"
title="rnaprot_logo" width="500" />
<body style="font-family:sans-serif" bgcolor="#190250" text="#fcc826" link="#fd3b9d" vlink="#fd3b9d" alink="#fd3b9d">
""" %(logo2_path)
else:
assert False, "invalid theme ID given"
# Add first section markdown.
mdtext += """
# Training set generation report
List of available statistics for the training dataset generated
by RNAProt (rnaprot gt):
- [Training dataset statistics](#set-stats)
- [Site length distribution](#len-plot)
- [Sequence complexity distribution](#ent-plot)
- [Di-nucleotide distribution](#dint-plot)
- [k-mer distributions](#kmer-plotly)
- [Top k-mer statistics](#kmer-stats)"""
if pos_str_stats_dic and neg_str_stats_dic:
mdtext += "\n"
mdtext += "- [Structural elements distribution](#str-elem-plot)\n"
mdtext += "- [Secondary structure statistics](#bp-stats)"
if pos_phastcons_stats_dic or pos_phylop_stats_dic:
mdtext += "\n"
mdtext += "- [Conservation scores distribution](#con-plot)\n"
mdtext += "- [Conservation scores statistics](#con-stats)"
if pos_eia_stats_dic and neg_eia_stats_dic:
mdtext += "\n"
mdtext += "- [Exon-intron region distribution](#eia-plot)\n"
mdtext += "- [Exon-intron region statistics](#eia-stats)"
if pos_tra_stats_dic and neg_tra_stats_dic:
mdtext += "\n"
mdtext += "- [Transcript region distribution](#tra-plot)\n"
mdtext += "- [Transcript region statistics](#tra-stats)"
if pos_rra_stats_dic and neg_rra_stats_dic:
mdtext += "\n"
mdtext += "- [Repeat region distribution](#rra-plot)\n"
mdtext += "- [Repeat region statistics](#rra-stats)"
if target_gbtc_dic and all_gbtc_dic:
mdtext += "\n"
mdtext += "- [Target gene biotype statistics](#gbt-stats)"
if t2hc_dic and t2i_dic:
mdtext += "\n"
mdtext += "- [Target region overlap statistics](#tro-stats)"
if add_feat_dic_list:
mdtext += "\n"
mdtext += "- [BED feature statistics](#bed-stats)\n"
mdtext += "- [BED feature coverage distribution](#bed-plot)"
mdtext += "\n \n"
# Make general stats table.
mdtext += """
## Training dataset statistics ### {#set-stats}
**Table:** Training dataset statistics regarding sequence lengths
(min, max, mean, and median length) in nucleotides (nt),
sequence complexity (mean Shannon entropy over all sequences in the set)
and nucleotide contents (A, C, G, U).
"""
mdtext += "| Attribute | Positives | Negatives | \n"
mdtext += "| :-: | :-: | :-: |\n"
mdtext += "| # sites | %i | %i |\n" %(c_pos_out, c_neg_out)
mdtext += "| min site length | %i | %i |\n" %(min(pos_len_list), min(neg_len_list))
mdtext += "| max site length | %i | %i |\n" %(max(pos_len_list), max(neg_len_list))
mdtext += "| mean site length | %.1f | %.1f |\n" %(statistics.mean(pos_len_list), statistics.mean(neg_len_list))
mdtext += "| median site length | %i | %i |\n" %(statistics.median(pos_len_list), statistics.median(neg_len_list))
mdtext += "| mean complexity | %.3f | %.3f |\n" %(statistics.mean(pos_entr_list), statistics.mean(neg_entr_list))
mdtext += '| %A |' + " %.2f | %.2f |\n" %(pos_ntr_dic["A"], neg_ntr_dic["A"])
mdtext += '| %C |' + " %.2f | %.2f |\n" %(pos_ntr_dic["C"], neg_ntr_dic["C"])
mdtext += '| %G |' + " %.2f | %.2f |\n" %(pos_ntr_dic["G"], neg_ntr_dic["G"])
mdtext += '| %U |' + " %.2f | %.2f |\n" %(pos_ntr_dic["U"], neg_ntr_dic["U"])
mdtext += "\n \n \n"
# Make site length distribution box plot.
create_set_lengths_box_plot(pos_len_list, neg_len_list, lengths_plot_out,
theme=theme,
disable_title=True)
lengths_plot_path = plots_folder + "/" + lengths_plot
mdtext += """
## Site length distribution ### {#len-plot}
Lengths differences in the training dataset can arise in two cases:
- FASTA sequences of various lengths are given as input (--in)
- BED sites of various lengths are given as input (--in) and --mode 2 is set
Otherwise, all sequences (positives and negatives) are expected to have
more or less the same length. This is because --mode 1 or --mode 3 both
reduce the sites to a length of 1 before uniform extension is applied
(controlled by --seq-ext).
Some length differences can still occur though, e.g. if transcript
sequences are extracted close to transcript ends, or as negatives
are sampled randomly from a larger pool of initial negatives.
"""
mdtext += '<img src="' + lengths_plot_path + '" alt="Site length distribution"' + "\n"
mdtext += 'title="Site length distribution" width="500" />' + "\n"
mdtext += """
**Figure:** Site length distributions for the positive and negative dataset.
"""
# Make sequence complexity box plot.
create_entropy_box_plot(pos_entr_list, neg_entr_list, entropy_plot_out,
theme=theme,
disable_title=True)
entropy_plot_path = plots_folder + "/" + entropy_plot
mdtext += """
## Sequence complexity distribution ### {#ent-plot}
The Shannon entropy is calculated for each sequence to measure
its information content (i.e., its complexity). A sequence with
equal amounts of all four nucleotides has an entropy value of 1.0
(highest possible). A sequence with equal amounts of two nucleotides
has an entropy value of 0.5. Finally, the lowest possible entropy is
achieved by a sequence which contains only one type of nucleotide.
Find the formula used to compute Shannon's entropy
[here](https://www.ncbi.nlm.nih.gov/pubmed/15215465) (see CE formula).
"""
mdtext += '<img src="' + entropy_plot_path + '" alt="Sequence complexity distribution"' + "\n"
mdtext += 'title="Sequence complexity distribution" width="500" />' + "\n"
mdtext += """
**Figure:** Sequence complexity (Shannon entropy
computed for each sequence) distributions for the positive and
negative dataset.
"""
# Make di-nucleotide grouped bar plot.
create_dint_ratios_grouped_bar_plot(pos_dintr_dic, neg_dintr_dic, dint_plot_out,
theme=theme,
disable_title=True)
dint_plot_path = plots_folder + "/" + dint_plot
mdtext += """
## Di-nucleotide distribution ### {#dint-plot}
Di-nucleotide percentages are shown for both the positive and negative dataset.
"""
mdtext += '<img src="' + dint_plot_path + '" alt="Di-nucleotide distribution"' + "\n"
mdtext += 'title="Di-nucleotide distribution" width="1000" />' + "\n"
mdtext += """
**Figure:** Di-nucleotide percentages for the positive and negative dataset.
"""
mdtext += """
## k-mer distributions ### {#kmer-plotly}
Frequency distributions of k-mers (in percent) for the positive and negative set.
"""
# plotly js path.
plotly_js_path = rplib_path + "/content/plotly-latest.min.js"
assert os.path.exists(plotly_js_path), "plotly js %s not found" %(plotly_js_path)
# Create 3-mer plotly scatter plot.
create_kmer_sc_plotly_scatter_plot(pos_3mer_dic, neg_3mer_dic, 3,
plotly_3mer_plot_out,
plotly_js_path,
theme=theme)
# Create 4-mer plotly scatter plot.
create_kmer_sc_plotly_scatter_plot(pos_4mer_dic, neg_4mer_dic, 4,
plotly_4mer_plot_out,
plotly_js_path,
theme=theme)
# Create 5-mer plotly scatter plot.
create_kmer_sc_plotly_scatter_plot(pos_5mer_dic, neg_5mer_dic, 5,
plotly_5mer_plot_out,
plotly_js_path,
theme=theme)
# Plot paths inside html report.
plotly_3mer_plot_path = plots_folder + "/" + plotly_3mer_plot
plotly_4mer_plot_path = plots_folder + "/" + plotly_4mer_plot
plotly_5mer_plot_path = plots_folder + "/" + plotly_5mer_plot
# R2 scores.
r2_3mer = calc_r2_corr_measure(pos_3mer_dic, neg_3mer_dic,
is_dic=True)
r2_4mer = calc_r2_corr_measure(pos_4mer_dic, neg_4mer_dic,
is_dic=True)
r2_5mer = calc_r2_corr_measure(pos_5mer_dic, neg_5mer_dic,
is_dic=True)
# Include 3-mer code.
mdtext += '<div class=class="container-fluid" style="margin-top:40px">' + "\n"
mdtext += '<iframe src="' + plotly_3mer_plot_path + '" width="500" height="500"></iframe>' + "\n"
mdtext += '</div>'
mdtext += """
**Figure:** 3-mer percentages in the positive and negative dataset. In case of
a uniform distribution with all 3-mers present, each 3-mer would have a
percentage = 1.5625. R2 = %.6f.
""" %(r2_3mer)
# Include 4-mer code.
mdtext += '<div class="container-fluid" style="margin-top:40px">' + "\n"
mdtext += '<iframe src="' + plotly_4mer_plot_path + '" width="600" height="600"></iframe>' + "\n"
mdtext += '</div>'
mdtext += """
**Figure:** 4-mer percentages in the positive and negative dataset. In case of
a uniform distribution with all 4-mers present, each 4-mer would have a
percentage = 0.390625. R2 = %.6f.
""" %(r2_4mer)
# Include 5-mer code.
mdtext += '<div class="container-fluid" style="margin-top:40px">' + "\n"
mdtext += '<iframe src="' + plotly_5mer_plot_path + '" width="700" height="700"></iframe>' + "\n"
mdtext += '</div>'
mdtext += """
**Figure:** 5-mer percentages in the positive and negative dataset. In case of
a uniform distribution with all 5-mers present, each 5-mer would have a
percentage = 0.09765625. R2 = %.6f.
""" %(r2_5mer)
# Make the k-mer tables.
top3mertab = generate_top_kmer_md_table(pos_3mer_dic, neg_3mer_dic,
top=kmer_top,
val_type="p")
top4mertab = generate_top_kmer_md_table(pos_4mer_dic, neg_4mer_dic,
top=kmer_top,
val_type="p")
top5mertab = generate_top_kmer_md_table(pos_5mer_dic, neg_5mer_dic,
top=kmer_top,
val_type="p")
mdtext += """
## Top k-mer statistics ### {#kmer-stats}
**Table:** Top %i 3-mers for the positive and negative set and their percentages in the respective sequence set. In case of uniform distribution with all 3-mers present, each 3-mer would have a percentage = 1.5625.
""" %(kmer_top)
mdtext += top3mertab
mdtext += "\n \n"
mdtext += """
**Table:** Top %i 4-mers for the positive and negative set and their percentages in the respective sequence set. In case of uniform distribution with all 4-mers present, each 4-mer would have a percentage = 0.390625.
""" %(kmer_top)
mdtext += top4mertab
mdtext += "\n \n"
mdtext += """
**Table:** Top %i 5-mers for the positive and negative set and their percentages in the respective sequence set. In case of uniform distribution with all 5-mers present, each 5-mer would have a percentage = 0.09765625.
""" %(kmer_top)
mdtext += top5mertab
mdtext += "\n \n \n"
if pos_str_stats_dic and neg_str_stats_dic:
# Checks.
assert pos_str_stats_dic['seqlen_sum'], "unexpected total sequence length of 0 encountered"
assert neg_str_stats_dic['seqlen_sum'], "unexpected total sequence length of 0 encountered"
# Make structural elements bar plot.
create_str_elem_grouped_bar_plot(pos_str_stats_dic, neg_str_stats_dic,
str_elem_plot_out,
theme=theme,
disable_title=True)
str_elem_plot_path = plots_folder + "/" + str_elem_plot
mdtext += """
## Structural elements distribution ### {#str-elem-plot}
Mean position-wise probabilities of the different loop context structural elements are shown
for both the positive and negative dataset. U: unpaired, E: external loop, H: hairpin loop,
I: internal loop, M: multi-loop, S: paired.
"""
mdtext += '<img src="' + str_elem_plot_path + '" alt="Structural elements distribution"' + "\n"
mdtext += 'title="Structural elements distribution" width="650" />' + "\n"
mdtext += """
**Figure:** Mean position-wise probabilities of different loop context structural elements for
the positive and negative dataset. U: unpaired, E: external loop, H: hairpin loop,
I: internal loop, M: multi-loop, S: paired.
"""
mdtext += """
## Secondary structure statistics ### {#bp-stats}
**Table:** Secondary structure statistics of
the generated training set. Mean probabilities p(..) of structural elements
are given together with standard deviations (+- ..).
"""
mdtext += "| Attribute | Positives | Negatives | \n"
mdtext += "| :-: | :-: | :-: |\n"
mdtext += "| total sequence length | %i | %i |\n" %(pos_str_stats_dic['seqlen_sum'], neg_str_stats_dic['seqlen_sum'])
mdtext += "| mean p(paired) | %.4f (+-%.4f) | %.4f (+-%.4f) |\n" %(pos_str_stats_dic['S'][0], pos_str_stats_dic['S'][1], neg_str_stats_dic['S'][0], neg_str_stats_dic['S'][1])
mdtext += "| mean p(unpaired) | %.4f (+-%.4f) | %.4f (+-%.4f) |\n" %(pos_str_stats_dic['U'][0], pos_str_stats_dic['U'][1], neg_str_stats_dic['U'][0], neg_str_stats_dic['U'][1])
mdtext += "| mean p(external loop) | %.4f (+-%.4f) | %.4f (+-%.4f) |\n" %(pos_str_stats_dic['E'][0], pos_str_stats_dic['E'][1], neg_str_stats_dic['E'][0], neg_str_stats_dic['E'][1])
mdtext += "| mean p(hairpin loop) | %.4f (+-%.4f) | %.4f (+-%.4f) |\n" %(pos_str_stats_dic['H'][0], pos_str_stats_dic['H'][1], neg_str_stats_dic['H'][0], neg_str_stats_dic['H'][1])
mdtext += "| mean p(internal loop) | %.4f (+-%.4f) | %.4f (+-%.4f) |\n" %(pos_str_stats_dic['I'][0], pos_str_stats_dic['I'][1], neg_str_stats_dic['I'][0], neg_str_stats_dic['I'][1])
mdtext += "| mean p(multi loop) | %.4f (+-%.4f) | %.4f (+-%.4f) |\n" %(pos_str_stats_dic['M'][0], pos_str_stats_dic['M'][1], neg_str_stats_dic['M'][0], neg_str_stats_dic['M'][1])
mdtext += "\n \n \n"
# Conservation scores plots and stats.
if pos_phastcons_stats_dic or pos_phylop_stats_dic:
mdtext += """
## Conservation scores distribution ### {#con-plot}
Mean conservation scores with standard deviations are shown for the positive
and negative set.
"""
# phastCons plot.
if pos_phastcons_stats_dic and neg_phastcons_stats_dic:
create_conservation_scores_bar_plot(pos_phastcons_stats_dic, neg_phastcons_stats_dic,
phastcons_plot_out, "phastCons",
disable_title=True,
theme=theme)
phastcons_plot_path = plots_folder + "/" + phastcons_plot
mdtext += '<img src="' + phastcons_plot_path + '" alt="phastCons scores distribution"' + "\n"
mdtext += 'title="phastCons scores distribution" width="400" />' + "\n"
mdtext += """
**Figure:** Mean phastCons conservation score and standard deviation for the positive and negative dataset.
"""
# phyloP plot.
if pos_phylop_stats_dic and neg_phylop_stats_dic:
create_conservation_scores_bar_plot(pos_phylop_stats_dic, neg_phylop_stats_dic,
phylop_plot_out, "phyloP",
disable_title=True,
theme=theme)
phylop_plot_path = plots_folder + "/" + phylop_plot
mdtext += '<img src="' + phylop_plot_path + '" alt="phyloP scores distribution"' + "\n"
mdtext += 'title="phyloP scores distribution" width="400" />' + "\n"
mdtext += """
**Figure:** Mean phyloP conservation score and standard deviation (before -1 .. 1 normalization) for the positive and negative dataset.
"""
mdtext += """
## Conservation scores statistics ### {#con-stats}
**Table:** Conservation scores statistics. Note that phyloP statistics are
calculated before normalization (normalizing values to -1 .. 1).
"""
mdtext += "| Attribute | Positives | Negatives | \n"
mdtext += "| :-: | :-: | :-: |\n"
if pos_phastcons_stats_dic and neg_phastcons_stats_dic:
pos_pc_zero_perc = "%.2f" % ((pos_phastcons_stats_dic["zero_pos"] / pos_phastcons_stats_dic["total_pos"]) * 100)
neg_pc_zero_perc = "%.2f" % ((neg_phastcons_stats_dic["zero_pos"] / neg_phastcons_stats_dic["total_pos"]) * 100)
mdtext += "| # phastCons scores | %i | %i |\n" %(pos_phastcons_stats_dic['total_pos'], neg_phastcons_stats_dic['total_pos'])
mdtext += "| # zero scores | %i | %i |\n" %(pos_phastcons_stats_dic['zero_pos'], neg_phastcons_stats_dic['zero_pos'])
mdtext += '| % zero scores |' + " %s | %s |\n" %(pos_pc_zero_perc, neg_pc_zero_perc)
mdtext += "| min score | %s | %s |\n" %(str(pos_phastcons_stats_dic['min']), str(neg_phastcons_stats_dic['min']))
mdtext += "| max score | %s | %s |\n" %(str(pos_phastcons_stats_dic['max']), str(neg_phastcons_stats_dic['max']))
mdtext += "| mean score | %.3f (+-%.3f) | %.3f (+-%.3f) |\n" %(pos_phastcons_stats_dic['mean'], pos_phastcons_stats_dic['stdev'], neg_phastcons_stats_dic['mean'], neg_phastcons_stats_dic['stdev'])
if pos_phylop_stats_dic and neg_phylop_stats_dic:
pos_pp_zero_perc = "%.2f" % ((pos_phylop_stats_dic["zero_pos"] / pos_phylop_stats_dic["total_pos"]) * 100)
neg_pp_zero_perc = "%.2f" % ((neg_phylop_stats_dic["zero_pos"] / neg_phylop_stats_dic["total_pos"]) * 100)
mdtext += "| # phyloP scores | %i | %i |\n" %(pos_phylop_stats_dic['total_pos'], neg_phylop_stats_dic['total_pos'])
mdtext += "| # zero scores | %i | %i |\n" %(pos_phylop_stats_dic['zero_pos'], neg_phylop_stats_dic['zero_pos'])
mdtext += '| % zero scores |' + " %s | %s |\n" %(pos_pp_zero_perc, neg_pp_zero_perc)
mdtext += "| min score | %s | %s |\n" %(str(pos_phylop_stats_dic['min']), str(neg_phylop_stats_dic['min']))
mdtext += "| max score | %s | %s |\n" %(str(pos_phylop_stats_dic['max']), str(neg_phylop_stats_dic['max']))
mdtext += "| mean score | %.3f (+-%.3f) | %.3f (+-%.3f) |\n" %(pos_phylop_stats_dic['mean'], pos_phylop_stats_dic['stdev'], neg_phylop_stats_dic['mean'], neg_phylop_stats_dic['stdev'])
mdtext += "\n \n \n"
# Exon-intron region plots and stats.
if pos_eia_stats_dic and neg_eia_stats_dic:
mdtext += """
## Exon-intron region distribution ### {#eia-plot}
Distribution of exon and intron regions for the positive and negative set.
"""
# EIA plot.
create_reg_annot_grouped_bar_plot(pos_eia_stats_dic, neg_eia_stats_dic, eia_plot_out,
["E", "I", "N"],
perc=True, theme=theme)
eia_plot_path = plots_folder + "/" + eia_plot
mdtext += '<img src="' + eia_plot_path + '" alt="Exon-intron region distribution"' + "\n"
mdtext += 'title="Exon-intron region distribution" width="550" />' + "\n"
mdtext += """
**Figure:** Percentages of exon (E) and intron (I) regions for the positive and negative set.
If --eia-n is set, also include regions not covered by introns or exons (N).
## Exon-intron region statistics ### {#eia-stats}
**Table:** Exon-intron region statistics for the positive and negative set.
If --eia-ib is set, also include statistics for sites containing intron
5' (F) and intron 3' (T) ends.
"""
# EIA stats.
if "F" in pos_eia_stats_dic:
pos_perc_f_sites = "%.2f" % ((pos_eia_stats_dic['F'] / c_pos_out)*100) + " %"
pos_perc_t_sites = "%.2f" % ((pos_eia_stats_dic['T'] / c_pos_out)*100) + " %"
neg_perc_f_sites = "%.2f" % ((neg_eia_stats_dic['F'] / c_neg_out)*100) + " %"
neg_perc_t_sites = "%.2f" % ((neg_eia_stats_dic['T'] / c_neg_out)*100) + " %"
pos_perc_e = "%.2f" % ((pos_eia_stats_dic['E'] / pos_eia_stats_dic['total_pos'])*100)
pos_perc_i = "%.2f" % ((pos_eia_stats_dic['I'] / pos_eia_stats_dic['total_pos'])*100)
neg_perc_e = "%.2f" % ((neg_eia_stats_dic['E'] / neg_eia_stats_dic['total_pos'])*100)
neg_perc_i = "%.2f" % ((neg_eia_stats_dic['I'] / neg_eia_stats_dic['total_pos'])*100)
if "N" in pos_eia_stats_dic:
pos_perc_n = "%.2f" % ((pos_eia_stats_dic['N'] / pos_eia_stats_dic['total_pos'])*100)
neg_perc_n = "%.2f" % ((neg_eia_stats_dic['N'] / neg_eia_stats_dic['total_pos'])*100)
mdtext += "| Attribute | Positives | Negatives | \n"
mdtext += "| :-: | :-: | :-: |\n"
mdtext += '| % E |' + " %s | %s |\n" %(pos_perc_e, neg_perc_e)
mdtext += '| % I |' + " %s | %s |\n" %(pos_perc_i, neg_perc_i)
if "N" in pos_eia_stats_dic:
mdtext += '| % N |' + " %s | %s |\n" %(pos_perc_n, neg_perc_n)
if "F" in pos_eia_stats_dic:
mdtext += "| F sites | %i (%s) | %i (%s) |\n" %(pos_eia_stats_dic['F'], pos_perc_f_sites, neg_eia_stats_dic['F'], neg_perc_f_sites)
mdtext += "| T sites | %i (%s) | %i (%s) |\n" %(pos_eia_stats_dic['T'], pos_perc_t_sites, neg_eia_stats_dic['T'], neg_perc_t_sites)
mdtext += "\n \n \n"
# Transcript region plots and stats.
if pos_tra_stats_dic and neg_tra_stats_dic:
mdtext += """
## Transcript region distribution ### {#tra-plot}
Distribution of transcript regions for the positive and negative set.
"""
# TRA plot.
create_reg_annot_grouped_bar_plot(pos_tra_stats_dic, neg_tra_stats_dic, tra_plot_out,
["F", "C", "T", "N"],
perc=True, theme=theme)
tra_plot_path = plots_folder + "/" + tra_plot
mdtext += '<img src="' + tra_plot_path + '" alt="Transcript region distribution"' + "\n"
mdtext += 'title="Transcript region distribution" width="550" />' + "\n"
mdtext += """
**Figure:** Percentages of 5'UTR (F), CDS (C), and 3'UTR (T) positions as well as
positions not covered by these transcript regions (N) for the positive and negative set.
## Transcript region statistics ### {#tra-stats}
**Table:** Transcript region statistics for the positive and negative set.
Percentages of positions covered by 5'UTR (F), CDS (C), 3'UTR (T), or non
of these regions (N) are given.
If --tra-codons is set, also include statistics for start codons (S) and
stop codons (E) (sites which contain these).
If --tra-borders is set, also include statistics for transcript starts (A),
transcript ends (Z), exon borders (B) (sites which contain these).
"""
# TRA stats.
pos_perc_f = "%.2f" % ((pos_tra_stats_dic['F'] / pos_tra_stats_dic['total_pos'])*100)
pos_perc_c = "%.2f" % ((pos_tra_stats_dic['C'] / pos_tra_stats_dic['total_pos'])*100)
pos_perc_t = "%.2f" % ((pos_tra_stats_dic['T'] / pos_tra_stats_dic['total_pos'])*100)
pos_perc_n = "%.2f" % ((pos_tra_stats_dic['N'] / pos_tra_stats_dic['total_pos'])*100)
neg_perc_f = "%.2f" % ((neg_tra_stats_dic['F'] / neg_tra_stats_dic['total_pos'])*100)
neg_perc_c = "%.2f" % ((neg_tra_stats_dic['C'] / neg_tra_stats_dic['total_pos'])*100)
neg_perc_t = "%.2f" % ((neg_tra_stats_dic['T'] / neg_tra_stats_dic['total_pos'])*100)
neg_perc_n = "%.2f" % ((neg_tra_stats_dic['N'] / neg_tra_stats_dic['total_pos'])*100)
mdtext += "| Attribute | Positives | Negatives | \n"
mdtext += "| :-: | :-: | :-: |\n"
mdtext += '| % F |' + " %s | %s |\n" %(pos_perc_f, neg_perc_f)
mdtext += '| % C |' + " %s | %s |\n" %(pos_perc_c, neg_perc_c)
mdtext += '| % T |' + " %s | %s |\n" %(pos_perc_t, neg_perc_t)
mdtext += '| % N |' + " %s | %s |\n" %(pos_perc_n, neg_perc_n)
# Start stop codon annotations.
if "S" in pos_tra_stats_dic:
pos_perc_s_sites = "%.2f" % ((pos_tra_stats_dic['S'] / c_pos_out)*100) + " %"
pos_perc_e_sites = "%.2f" % ((pos_tra_stats_dic['E'] / c_pos_out)*100) + " %"
neg_perc_s_sites = "%.2f" % ((neg_tra_stats_dic['S'] / c_neg_out)*100) + " %"
neg_perc_e_sites = "%.2f" % ((neg_tra_stats_dic['E'] / c_neg_out)*100) + " %"
mdtext += "| S sites | %i (%s) | %i (%s) |\n" %(pos_tra_stats_dic['S'], pos_perc_s_sites, neg_tra_stats_dic['S'], neg_perc_s_sites)
mdtext += "| E sites | %i (%s) | %i (%s) |\n" %(pos_tra_stats_dic['E'], pos_perc_e_sites, neg_tra_stats_dic['E'], neg_perc_e_sites)
# Border annotations.
if "A" in pos_tra_stats_dic:
pos_perc_a_sites = "%.2f" % ((pos_tra_stats_dic['A'] / c_pos_out)*100) + " %"
pos_perc_b_sites = "%.2f" % ((pos_tra_stats_dic['B'] / c_pos_out)*100) + " %"
pos_perc_z_sites = "%.2f" % ((pos_tra_stats_dic['Z'] / c_pos_out)*100) + " %"
neg_perc_a_sites = "%.2f" % ((neg_tra_stats_dic['A'] / c_neg_out)*100) + " %"
neg_perc_b_sites = "%.2f" % ((neg_tra_stats_dic['B'] / c_neg_out)*100) + " %"
neg_perc_z_sites = "%.2f" % ((neg_tra_stats_dic['Z'] / c_neg_out)*100) + " %"
mdtext += "| A sites | %i (%s) | %i (%s) |\n" %(pos_tra_stats_dic['A'], pos_perc_a_sites, neg_tra_stats_dic['A'], neg_perc_a_sites)
mdtext += "| B sites | %i (%s) | %i (%s) |\n" %(pos_tra_stats_dic['B'], pos_perc_b_sites, neg_tra_stats_dic['B'], neg_perc_b_sites)
mdtext += "| Z sites | %i (%s) | %i (%s) |\n" %(pos_tra_stats_dic['Z'], pos_perc_z_sites, neg_tra_stats_dic['Z'], neg_perc_z_sites)
mdtext += "\n \n \n"
# Repeat region plots and stats.
if pos_rra_stats_dic and neg_rra_stats_dic:
mdtext += """
## Repeat region distribution ### {#rra-plot}
Distribution of repeat regions for the positive and negative set. Repeat
regions are annotated in the .2bit genomic sequences file as lowercase
sequences. These regions were identified by RepeatMasker and Tandem Repeats
Finder (with period of 12 or less).
"""
# RRA plot.
create_reg_annot_grouped_bar_plot(pos_rra_stats_dic, neg_rra_stats_dic, rra_plot_out,
["R", "N"],
perc=True, theme=theme)
rra_plot_path = plots_folder + "/" + rra_plot
mdtext += '<img src="' + rra_plot_path + '" alt="Repeat region distribution"' + "\n"
mdtext += 'title="Repeat region distribution" width="550" />' + "\n"
mdtext += """
**Figure:** Percentages of repeat (R) and no-repeat (N) regions for the
positive and negative set.
## Repeat region statistics ### {#rra-stats}
**Table:** Repeat region statistics for the positive and negative set.
Percentages of positive and negative regions covered by repeat (R)
and non-repeat (N) regions are given.
"""
# RRA stats.
pos_perc_r = "%.2f" % ((pos_rra_stats_dic['R'] / pos_rra_stats_dic['total_pos'])*100)
pos_perc_n = "%.2f" % ((pos_rra_stats_dic['N'] / pos_rra_stats_dic['total_pos'])*100)
neg_perc_r = "%.2f" % ((neg_rra_stats_dic['R'] / neg_rra_stats_dic['total_pos'])*100)
neg_perc_n = "%.2f" % ((neg_rra_stats_dic['N'] / neg_rra_stats_dic['total_pos'])*100)
mdtext += "| Attribute | Positives | Negatives |\n"
mdtext += "| :-: | :-: | :-: |\n"
mdtext += '| % R |' + " %s | %s |\n" %(pos_perc_r, neg_perc_r)
mdtext += '| % N |' + " %s | %s |\n" %(pos_perc_n, neg_perc_n)
mdtext += "\n \n \n"
# Target gene biotype count stats.
if target_gbtc_dic and all_gbtc_dic:
mdtext += """
## Target gene biotype statistics ### {#gbt-stats}
**Table:** Target gene biotype counts for the positive set and their percentages
(count normalized by total count for the respective gene biotype).
"""
mdtext += "| Gene biotype | Target count | Total count | Percentage | \n"
mdtext += "| :-: | :-: | :-: | :-: |\n"
unit = " %"
for bt, target_c in sorted(target_gbtc_dic.items(), key=lambda item: item[1], reverse=True):
all_c = all_gbtc_dic[bt]
perc_c = "%.2f" % ((target_c / all_c) * 100)
mdtext += "| %s | %i | %i | %s%s |\n" %(bt, target_c, all_c, perc_c, unit)
mdtext += "\n \n \n"
if t2hc_dic and t2i_dic:
mdtext += """
## Target region overlap statistics ### {#tro-stats}
**Table:** Target region overlap statistics, showing the top %i targeted
regions (transcript or genes), with the # overlaps == # of positive sites
overlapping with the region.
""" %(target_top)
if dataset_type == "t":
mdtext += "| # overlaps | Transcript ID | Transcript biotype | Gene ID | Gene name | Gene biotype | \n"
mdtext += "| :-: | :-: | :-: | :-: | :-: | :-: |\n"
i = 0
for tr_id, ol_c in sorted(t2hc_dic.items(), key=lambda item: item[1], reverse=True):
i += 1
if i > target_top:
break
tr_bt = t2i_dic[tr_id][0]
gene_id = t2i_dic[tr_id][1]
gene_name = t2i_dic[tr_id][2]
gene_bt = t2i_dic[tr_id][3]
mdtext += "| %i | %s | %s | %s | %s | %s |\n" %(ol_c, tr_id, tr_bt, gene_id, gene_name, gene_bt)
mdtext += "| ... | | | | | |\n"
mdtext += "\n \n \n"
elif dataset_type == "g":
mdtext += "| # overlaps | Gene ID | Gene name | Gene biotype | \n"
mdtext += "| :-: | :-: | :-: | :-: |\n"
i = 0
for gene_id, ol_c in sorted(t2hc_dic.items(), key=lambda item: item[1], reverse=True):
i += 1
if i > target_top:
break
gene_name = t2i_dic[gene_id][0]
gene_bt = t2i_dic[gene_id][1]
mdtext += "| %i | %s | %s | %s |\n" %(ol_c, gene_id, gene_name, gene_bt)
mdtext += "| ... | | | |\n"
mdtext += "\n \n \n"
# Additional BED annotations.
if add_feat_dic_list:
mdtext += """
## BED feature statistics ### {#bed-stats}
Additional BED annotation feature statistics (from --feat-in table) for the
positive and negative dataset.
"""
pos_cov_dic = {}
neg_cov_dic = {}
for i in range(0, len(add_feat_dic_list) - 1, 2):
pos_stats_dic = add_feat_dic_list[i]
neg_stats_dic = add_feat_dic_list[i+1]
feat_id = pos_stats_dic["feat_id"]
feat_type = pos_stats_dic["feat_type"]
pos_total_pos = pos_stats_dic["total_pos"]
neg_total_pos = neg_stats_dic["total_pos"]
pos_perc_zero_sites = "%.2f" % ((pos_stats_dic['zero_sites'] / pos_stats_dic['total_sites'])*100) + " %"
neg_perc_zero_sites = "%.2f" % ((neg_stats_dic['zero_sites'] / neg_stats_dic['total_sites'])*100) + " %"
if feat_type == "C":
pos_c_0 = pos_stats_dic["0"]
pos_c_1 = pos_stats_dic["1"]
neg_c_0 = neg_stats_dic["0"]
neg_c_1 = neg_stats_dic["1"]
pos_perc_0 = "%.2f" % ((pos_c_0 / pos_total_pos)*100) + " %"
pos_perc_1 = "%.2f" % ((pos_c_1 / pos_total_pos)*100) + " %"
neg_perc_0 = "%.2f" % ((neg_c_0 / neg_total_pos)*100) + " %"
neg_perc_1 = "%.2f" % ((neg_c_1 / neg_total_pos)*100) + " %"
else:
pos_mean = pos_stats_dic["mean"]
pos_stdev = pos_stats_dic["stdev"]
neg_mean = neg_stats_dic["mean"]
neg_stdev = neg_stats_dic["stdev"]
pos_c_0 = pos_stats_dic["zero_pos"]
neg_c_0 = neg_stats_dic["zero_pos"]
pos_c_1 = pos_total_pos - pos_c_0
neg_c_1 = neg_total_pos - neg_c_0
pos_perc_0 = "%.2f" % ((pos_c_0 / pos_total_pos)*100) + " %"
pos_perc_1 = "%.2f" % ((pos_c_1 / pos_total_pos)*100) + " %"
neg_perc_0 = "%.2f" % ((neg_c_0 / neg_total_pos)*100) + " %"
neg_perc_1 = "%.2f" % ((neg_c_1 / neg_total_pos)*100) + " %"
# Store feature coverage (percentage of positions overlapping).
pos_feat_cov = (pos_c_1 / pos_total_pos) * 100
neg_feat_cov = (neg_c_1 / neg_total_pos) * 100
pos_cov_dic[feat_id] = pos_feat_cov
neg_cov_dic[feat_id] = neg_feat_cov
mdtext += """
### BED annotation file feature \"%s\" statistics
""" %(feat_id)
if feat_type == "C":
mdtext += """
**Table:** BED feature region length + score statistics for the
positive and negative set.
Feature type is one-hot encoding, i.e., every overlapping position
gets a 1 assigned, every not overlapping position a 0.
"""
else:
mdtext += """
**Table:** BED feature region length + score statistics for the
positive and negative set.
Feature type is numerical, i.e., every position gets the score of the
overlapping feature region assigned. In case of no feature region overlap,
the position gets a score of 0.
"""
mdtext += "| Attribute | Positives | Negatives |\n"
mdtext += "| :-: | :-: | :-: |\n"
mdtext += "| mean length | %.2f (+-%.2f) | %.2f (+-%.2f) |\n" %(pos_stats_dic["mean_l"], pos_stats_dic["stdev_l"], neg_stats_dic["mean_l"], neg_stats_dic["stdev_l"])
mdtext += "| median length | %i | %i |\n" %(pos_stats_dic["median_l"], neg_stats_dic["median_l"])
mdtext += "| min length | %i | %i |\n" %(pos_stats_dic["min_l"], neg_stats_dic["min_l"])
mdtext += "| max length | %i | %i |\n" %(pos_stats_dic["max_l"], neg_stats_dic["max_l"])
if feat_type == "C":
mdtext += "| # total positions | %i | %i |\n" %(pos_total_pos, neg_total_pos)
mdtext += "| # 0 positions | %i (%s) | %i (%s) |\n" %(pos_c_0, pos_perc_0, neg_c_0, neg_perc_0)
mdtext += "| # 1 positions | %i (%s) | %i (%s) |\n" %(pos_c_1, pos_perc_1, neg_c_1, neg_perc_1)
mdtext += '| % all-zero sites |' + " %s | %s |\n" %(pos_perc_zero_sites, neg_perc_zero_sites)
else:
mdtext += "| # total positions | %i | %i |\n" %(pos_total_pos, neg_total_pos)
mdtext += "| # 0 positions | %i (%s) | %i (%s) |\n" %(pos_c_0, pos_perc_0, neg_c_0, neg_perc_0)
mdtext += "| # non-0 positions | %i (%s) | %i (%s) |\n" %(pos_c_1, pos_perc_1, neg_c_1, neg_perc_1)
mdtext += '| % all-zero sites |' + " %s | %s |\n" %(pos_perc_zero_sites, neg_perc_zero_sites)
mdtext += "| mean score | %.3f (+-%.3f) | %.3f (+-%.3f) |\n" %(pos_mean, pos_stdev, neg_mean, neg_stdev)
mdtext += "\n \n \n"
# Create additional BED features coverage plot.
mdtext += """
## BED feature coverage distribution ### {#bed-plot}
Additional BED feature coverage distributions for the
positive and negative dataset.
"""
create_train_set_bed_feat_cov_plot(pos_cov_dic, neg_cov_dic,
bed_cov_plot_out,
theme=theme)
bed_cov_plot_path = plots_folder + "/" + bed_cov_plot
mdtext += '<img src="' + bed_cov_plot_path + '" alt="BED feature coverage distribution"' + "\n"
mdtext += 'title="BED feature coverage distribution" width="800" />' + "\n"
mdtext += """
**Figure:** Additional BED feature coverage distributions for the
positive and negative dataset. Feature coverage means how much
percent of the positive or negative regions are covered by the
respective BED feature (i.e., overlap with it). The BED feature
IDs from --feat-in are given on the y-axis, their coverage on the
x-axis.
"""
print("Generate HTML report ... ")
# Convert mdtext to html.
md2html = markdown(mdtext, extensions=['attr_list', 'tables'])
#OUTMD = open(md_out,"w")
#OUTMD.write("%s\n" %(mdtext))
#OUTMD.close()
OUTHTML = open(html_out,"w")
OUTHTML.write("%s\n" %(md2html))
OUTHTML.close()
################################################################################
def create_train_set_bed_feat_cov_plot(pos_cov_dic, neg_cov_dic, out_plot,
theme=1):
"""
Create a grouped bar plot, showing the coverage for each BED feature
from --feat-in over the positive and negative set. Coverage means
how much percentage of the positive or negative regions are covered
by the BED feature (== overlap with it).
Input dictionaries for positives (pos_cov_dic) and negatives
(neg_cov_dic) store for each feature ID (key) the coverage of
the feature in percent (value).
Create a dataframe using Pandas, and use seaborn for plotting.
Store plot in out_plot.
MV colors:
#69e9f6, #f154b2
"""
# Checker.
assert pos_cov_dic, "given dictionary pos_cov_dic empty"
assert neg_cov_dic, "given dictionary neg_cov_dic empty"
# Make pandas dataframe.
pos_label = "Positives"
neg_label = "Negatives"
data = {'set': [], 'feat_id': [], 'perc': []}
for feat_id in pos_cov_dic:
data['set'].append(pos_label)
data['feat_id'].append(feat_id)
data['perc'].append(pos_cov_dic[feat_id])
for feat_id in neg_cov_dic:
data['set'].append(neg_label)
data['feat_id'].append(feat_id)
data['perc'].append(neg_cov_dic[feat_id])
df = pd.DataFrame (data, columns = ['set','feat_id', 'perc'])
# Scale height depending on # of features.
c_ids = len(pos_cov_dic)
fheight = 1.5 * c_ids
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
g = sns.catplot(x="perc", y="feat_id", hue="set", data=df,
kind="bar", palette=["#69e9f6", "#f154b2"],
edgecolor="lightgrey",
legend=False)
g.fig.set_figwidth(15)
g.fig.set_figheight(fheight)
# Modify axes.
ax = g.axes
ax[0,0].set_xlabel("Feature coverage (%)",fontsize=20)
ax[0,0].set(ylabel=None)
ax[0,0].tick_params(axis='x', labelsize=16)
ax[0,0].tick_params(axis='y', labelsize=20)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=16)
g.savefig(out_plot, dpi=100, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
g = sns.catplot(x="perc", y="feat_id", hue="set", data=df,
kind="bar", palette=["blue", "darkblue"],
edgecolor="#fcc826",
legend=False)
g.fig.set_figwidth(15)
g.fig.set_figheight(fheight)
# Modify axes.
ax = g.axes
ax[0,0].set_xlabel("Percentage (%)",fontsize=20)
ax[0,0].set(ylabel=None)
ax[0,0].tick_params(axis='x', labelsize=16)
ax[0,0].tick_params(axis='y', labelsize=20)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=16, framealpha=0)
g.savefig(out_plot, dpi=100, bbox_inches='tight', transparent=True)
################################################################################
def create_reg_annot_grouped_bar_plot(pos_ra_dic, neg_ra_dic, out_plot,
plot_labels,
perc=False,
theme=1):
"""
Create a bar plot for region labels, given plot_labels to define what
counts to plot. If perc=True, look for "total_pos" dictionary entry
to normalize counts and plot percentages.
Input dictionary has following keys:
labels, total_pos
Create a dataframe using Pandas, and use seaborn for plotting.
Store plot in out_plot.
MV colors:
#69e9f6, #f154b2
"""
# Checker.
assert pos_ra_dic, "given dictionary pos_ra_dic empty"
assert neg_ra_dic, "given dictionary neg_ra_dic empty"
assert plot_labels, "given labels to plot list empty"
if perc:
assert pos_ra_dic["total_pos"], "total_pos key missing in pos_ra_dic"
assert neg_ra_dic["total_pos"], "total_pos key missing in neg_ra_dic"
# Make pandas dataframe.
pos_label = "Positives"
neg_label = "Negatives"
data = {'set': [], 'label': [], 'count': []}
for l in pos_ra_dic:
if l in plot_labels:
lc = pos_ra_dic[l]
if perc:
lc = (lc / pos_ra_dic["total_pos"]) * 100
data['set'].append(pos_label)
data['label'].append(l)
data['count'].append(lc)
for l in neg_ra_dic:
if l in plot_labels:
lc = neg_ra_dic[l]
if perc:
lc = (lc / neg_ra_dic["total_pos"]) * 100
data['set'].append(neg_label)
data['label'].append(l)
data['count'].append(lc)
df = pd.DataFrame (data, columns = ['set','count', 'label'])
y_label = "# positions"
if perc:
y_label = "% positions"
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
g = sns.catplot(x="label", y="count", hue="set", data=df,
kind="bar", palette=["#69e9f6", "#f154b2"],
edgecolor="lightgrey",
legend=False)
g.fig.set_figwidth(8)
g.fig.set_figheight(4)
# Modify axes.
ax = g.axes
ax[0,0].set_ylabel(y_label,fontsize=22)
ax[0,0].set(xlabel=None)
ax[0,0].tick_params(axis='x', labelsize=22)
ax[0,0].tick_params(axis='y', labelsize=17)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=17)
g.savefig(out_plot, dpi=100, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
g = sns.catplot(x="label", y="count", hue="set", data=df,
kind="bar", palette=["blue", "darkblue"],
edgecolor="#fcc826",
legend=False)
g.fig.set_figwidth(8)
g.fig.set_figheight(4)
# Modify axes.
ax = g.axes
ax[0,0].set_ylabel(y_label,fontsize=22)
ax[0,0].set(xlabel=None)
ax[0,0].tick_params(axis='x', labelsize=22)
ax[0,0].tick_params(axis='y', labelsize=17)
# Add legend at specific position.
plt.legend(loc=(1.01, 0.4), fontsize=17, framealpha=0)
g.savefig(out_plot, dpi=100, bbox_inches='tight', transparent=True)
################################################################################
def create_conservation_scores_bar_plot(pos_con_dic, neg_con_dic, out_plot,
con_type,
disable_title=False,
theme=1):
"""
Create a bar plot, showing the mean conservation score with standard
deviation error bar in the positive and negative set.
Input dictionary has following keys:
mean, stdev, zero_pos, total_pos
Create a dataframe using Pandas, and use seaborn for plotting.
Store plot in out_plot.
MV colors:
#69e9f6, #f154b2
"""
# Checker.
assert pos_con_dic, "given dictionary pos_con_dic empty"
assert neg_con_dic, "given dictionary neg_con_dic empty"
# Make pandas dataframe.
pos_label = "Positives"
neg_label = "Negatives"
data = {'set': [], 'mean': [], 'stdev': []}
data['set'].append(pos_label)
data['mean'].append(pos_con_dic['mean'])
data['stdev'].append(pos_con_dic['stdev'])
data['set'].append(neg_label)
data['mean'].append(neg_con_dic['mean'])
data['stdev'].append(neg_con_dic['stdev'])
df = pd.DataFrame (data, columns = ['set','mean', 'stdev'])
y_label = "Mean " + con_type + " score"
set_title = con_type + " scores distribution"
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.barplot(x="set", y="mean", data=df, yerr=df['stdev'], ecolor="darkgrey",
palette=["#69e9f6", "#f154b2"],
edgecolor="lightgrey")
fig.set_figwidth(5)
fig.set_figheight(4)
ax.set(xlabel=None)
ax.set_ylabel(y_label,fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=14)
if not disable_title:
ax.axes.set_title(set_title, fontsize=20)
fig.savefig(out_plot, dpi=100, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.barplot(x="set", y="mean", data=df, yerr=df['stdev'], ecolor="#fcc826",
palette=["blue", "darkblue"],
edgecolor="#fcc826")
fig.set_figwidth(5)
fig.set_figheight(4)
ax.set(xlabel=None)
ax.set_ylabel(y_label,fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=14)
if not disable_title:
ax.axes.set_title(set_title, fontsize=20)
fig.savefig(out_plot, dpi=100, bbox_inches='tight', transparent=True)
################################################################################
def decimal_ceil(a, prec):
"""
Round up a given decimal number at a certain precision.
>>> a = 0.002489
>>> decimal_ceil(a, 3)
0.003
>>> decimal_ceil(a, 2)
0.01
"""
return np.round(a + 0.5 * 10**(-prec), prec)
################################################################################
def generate_top_kmer_md_table(pos_kmer_dic, neg_kmer_dic,
top=5,
val_type="c"):
"""
Given k-mer count dictionaries for positive and negative set, generate
a markdown table with top 5 k-mers (sorted by decending dictionary value).
val_type:
Specify type of stored dictionary value.
c : count (count of k-mer)
r : ratio (k-mer count / total k-mer count)
p : percentage ( (k-mer count / total k-mer count) * 100)
"""
assert pos_kmer_dic, "given dictionary pos_kmer_dic empty"
assert neg_kmer_dic, "given dictionary neg_kmer_dic empty"
assert re.search("^[c|p|r]$", val_type), "invalid val_type given"
# Get size of k.
k = 0
for kmer in pos_kmer_dic:
k = len(kmer)
break
# Expected kmer number.
exp_kmer_nr = pow(4,k)
pos_kmer_nr = 0
neg_kmer_nr = 0
for kmer in pos_kmer_dic:
kc = pos_kmer_dic[kmer]
if kc:
pos_kmer_nr += 1
for kmer in neg_kmer_dic:
kc = neg_kmer_dic[kmer]
if kc:
neg_kmer_nr += 1
pos_kmer_perc = "%.2f " %((pos_kmer_nr / exp_kmer_nr) * 100) + " %"
neg_kmer_perc = "%.2f " %((neg_kmer_nr / exp_kmer_nr) * 100) + " %"
# Adjust decimal places based on k-mer size.
dc_p = 2
dc_r = 4
if k > 3:
for i in range(k-3):
dc_p += 1
dc_r += 1
dc_p_str = "%."+str(dc_p)+"f"
dc_r_str = "%."+str(dc_r)+"f"
add_ch = ""
if val_type == "p":
add_ch = " %"
# Format percentage to two decimal places.
for kmer in pos_kmer_dic:
new_v = dc_p_str % pos_kmer_dic[kmer]
pos_kmer_dic[kmer] = new_v
for kmer in neg_kmer_dic:
new_v = dc_p_str % neg_kmer_dic[kmer]
neg_kmer_dic[kmer] = new_v
elif val_type == "r":
# Format percentage to four decimal places.
for kmer in pos_kmer_dic:
new_v = dc_r_str % pos_kmer_dic[kmer]
pos_kmer_dic[kmer] = new_v
for kmer in neg_kmer_dic:
new_v = dc_r_str % neg_kmer_dic[kmer]
neg_kmer_dic[kmer] = new_v
# Get top j k-mers.
i = 0
pos_topk_list = []
for kmer, v in sorted(pos_kmer_dic.items(), key=lambda item: item[1], reverse=True):
i += 1
if i > top:
break
pos_topk_list.append(kmer)
i = 0
neg_topk_list = []
for kmer, v in sorted(neg_kmer_dic.items(), key=lambda item: item[1], reverse=True):
i += 1
if i > top:
break
neg_topk_list.append(kmer)
# Generate markdown table.
mdtable = "| Rank | Positives | Negatives |\n"
mdtable += "| :-: | :-: | :-: |\n"
for i in range(top):
pos_kmer = pos_topk_list[i]
neg_kmer = neg_topk_list[i]
pos = i + 1
mdtable += "| %i | %s (%s%s) | %s (%s%s) |\n" %(pos, pos_kmer, str(pos_kmer_dic[pos_kmer]), add_ch, neg_kmer, str(neg_kmer_dic[neg_kmer]), add_ch)
mdtable += "| ... | | |\n"
mdtable += "| # distinct k-mers | %i (%s) | %i (%s) |\n" %(pos_kmer_nr, pos_kmer_perc, neg_kmer_nr, neg_kmer_perc)
# Return markdown table.
return mdtable
################################################################################
def convert_seq_to_kmer_embedding(seq, k, kmer2idx_dic,
l2d=False):
"""
Convert RNA sequence to k-mer embedding (mapping k-mers to numbers). Returns
k-mer index list with length LL = ((LS - k) / s) + 1
So e.g. s = 1 (stride), k = 3, LS = 5 (sequence length),
LL (returned list length)
LL = 5-3+1 = 3
l2d:
Instead of returning list of indices, return list of lists of indices.
>>> seq = "ACGUA"
>>> kmer2idx_dic = {'AA': 1, 'AC': 2, 'AG': 3, 'AU': 4, 'CA': 5, 'CC': 6, 'CG': 7, 'CU': 8, 'GA': 9, 'GC': 10, 'GG': 11, 'GU': 12, 'UA': 13, 'UC': 14, 'UG': 15, 'UU': 16}
>>> k = 2
>>> convert_seq_to_kmer_embedding(seq, k, kmer2idx_dic)
[2, 7, 12, 13]
>>> convert_seq_to_kmer_embedding(seq, k, kmer2idx_dic, l2d=True)
[[2], [7], [12], [13]]
"""
assert seq, "invalid seq given"
kmer_idx_list = []
l_seq = len(seq)
for ki in range(l_seq):
win_s = ki
win_e = ki + k
if win_e > l_seq:
break
kmer = seq[win_s:win_e]
assert kmer in kmer2idx_dic, "k-mer %s not in kmer2idx_dic" %(kmer)
k_idx = kmer2idx_dic[kmer]
if l2d:
kmer_idx_list.append([k_idx])
else:
kmer_idx_list.append(k_idx)
assert kmer_idx_list, "kmer_idx_list empty"
return kmer_idx_list
################################################################################
def get_kmer_dic(k,
fill_idx=False,
rna=False):
"""
Return a dictionary of k-mers. By default, DNA alphabet is used (ACGT).
Value for each k-mer key is set to 0.
rna:
Use RNA alphabet (ACGU).
>>> get_kmer_dic(1)
{'A': 0, 'C': 0, 'G': 0, 'T': 0}
>>> get_kmer_dic(2, rna=True)
{'AA': 0, 'AC': 0, 'AG': 0, 'AU': 0, 'CA': 0, 'CC': 0, 'CG': 0, 'CU': 0, 'GA': 0, 'GC': 0, 'GG': 0, 'GU': 0, 'UA': 0, 'UC': 0, 'UG': 0, 'UU': 0}
>>> get_kmer_dic(1, fill_idx=True)
{'A': 1, 'C': 2, 'G': 3, 'T': 4}
>>> get_kmer_dic(2, rna=True, fill_idx=True)
{'AA': 1, 'AC': 2, 'AG': 3, 'AU': 4, 'CA': 5, 'CC': 6, 'CG': 7, 'CU': 8, 'GA': 9, 'GC': 10, 'GG': 11, 'GU': 12, 'UA': 13, 'UC': 14, 'UG': 15, 'UU': 16}
"""
# Check.
assert k, "invalid k given"
assert k > 0, "invalid k given"
# Dictionary.
mer2c_dic = {}
# Alphabet.
nts = ["A", "C", "G", "T"]
if rna:
nts = ["A", "C", "G", "U"]
# Recursive k-mer dictionary creation.
def fill(i, seq, mer2c_dic):
if i:
for nt in nts:
fill(i-1, seq+nt, mer2c_dic)
else:
mer2c_dic[seq] = 0
fill(k, "", mer2c_dic)
if fill_idx:
idx = 0
for kmer,c in sorted(mer2c_dic.items()):
idx += 1
mer2c_dic[kmer] = idx
return mer2c_dic
################################################################################
def seqs_dic_count_kmer_freqs(seqs_dic, k,
rna=False,
perc=False,
return_ratios=False,
report_key_error=True,
convert_to_uc=False):
"""
Given a dictionary with sequences seqs_dic, count how many times each
k-mer is found over all sequences (== get k-mer frequencies).
Return k-mer frequencies count dictionary.
By default, a DNA dictionary is used, and key errors will be reported.
rna:
Instead of DNA dictionary, use RNA dictionary (ACGU) for counting
k-mers.
perc:
If True, make percentages out of ratios (*100).
return_ratios:
Return di-nucleotide ratios instead of frequencies (== counts).
report_key_error:
If True, report key error (di-nucleotide not in count_dic).
convert_to_uc:
Convert sequences to uppercase before counting.
>>> seqs_dic = {'seq1': 'AACGTC', 'seq2': 'GGACT'}
>>> seqs_dic_count_kmer_freqs(seqs_dic, 2)
{'AA': 1, 'AC': 2, 'AG': 0, 'AT': 0, 'CA': 0, 'CC': 0, 'CG': 1, 'CT': 1, 'GA': 1, 'GC': 0, 'GG': 1, 'GT': 1, 'TA': 0, 'TC': 1, 'TG': 0, 'TT': 0}
>>> seqs_dic = {'seq1': 'AAACGT'}
>>> seqs_dic_count_kmer_freqs(seqs_dic, 2, return_ratios=True, perc=True)
{'AA': 40.0, 'AC': 20.0, 'AG': 0.0, 'AT': 0.0, 'CA': 0.0, 'CC': 0.0, 'CG': 20.0, 'CT': 0.0, 'GA': 0.0, 'GC': 0.0, 'GG': 0.0, 'GT': 20.0, 'TA': 0.0, 'TC': 0.0, 'TG': 0.0, 'TT': 0.0}
"""
# Checks.
assert seqs_dic, "given dictinary seqs_dic empty"
assert k, "invalid k given"
assert k > 0, "invalid k given"
# Get k-mer dictionary.
count_dic = get_kmer_dic(k, rna=rna)
# Count k-mers for all sequences in seqs_dic.
total_c = 0
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
if convert_to_uc:
seq = seq.upper()
for i in range(len(seq)-k+1):
kmer = seq[i:i+k]
if report_key_error:
assert kmer in count_dic, "k-mer \"%s\" not in count_dic" %(kmer)
if kmer in count_dic:
count_dic[kmer] += 1
total_c += 1
assert total_c, "no k-mers counted for given seqs_dic (sequence lengths < set k ?)"
# Calculate ratios.
if return_ratios:
for kmer in count_dic:
ratio = count_dic[kmer] / total_c
if perc:
count_dic[kmer] = ratio*100
else:
count_dic[kmer] = ratio
# Return k-mer counts or ratios.
return count_dic
################################################################################
def phylop_norm_test_scores(test_pp_con_out,
dec_round=4,
int_whole_nr=True):
"""
Read in phyloP .pp.con file scores for test set,
normalize values to -1 ... 1 and overwrite (!) existing phyloP .pp.con file.
Normalization is min-max for negative and positive phyloP scores
separately.
int_whole_nr:
If True, output whole numbers without decimal places.
"""
# Mean normalization for phyloP scores.
test_con_dic = {}
seq_id = ""
pp_max = -1000
pp_min = 1000
# Read in positive set phyloP scores.
with open(test_pp_con_out) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
test_con_dic[seq_id] = []
else:
pp_sc = float(line.strip())
test_con_dic[seq_id].append(pp_sc)
if pp_sc > pp_max:
pp_max = pp_sc
if pp_sc < pp_min:
pp_min = pp_sc
f.closed
assert test_con_dic, "no entries read into test_con_dic dictionary"
# Individual max values for min-max of positive+negative values.
pp_neg_max = abs(pp_min)
pp_neg_min = 0
pp_pos_max = pp_max
pp_pos_min = 0
# Mean normalize test phylop scores and overwrite original file.
OUTP = open(test_pp_con_out,"w")
for seq_id in test_con_dic:
OUTP.write(">%s\n" %(seq_id))
for pp_sc in test_con_dic[seq_id]:
if pp_sc == 0:
OUTP.write("0\n")
else:
if pp_sc < 0:
pp_sc_abs = abs(pp_sc)
pp_sc_norm = min_max_normalize(pp_sc_abs, pp_neg_max, pp_neg_min)
pp_sc_norm = round(pp_sc_norm, dec_round)
pp_sc_norm = -1*pp_sc_norm
else:
pp_sc_norm = min_max_normalize(pp_sc, pp_pos_max, pp_pos_min)
pp_sc_norm = round(pp_sc_norm, dec_round)
if pp_sc_norm == 0:
pp_sc_norm = 0
if int_whole_nr and not pp_sc_norm % 1:
OUTP.write("%i\n" %(int(pp_sc_norm)))
else:
OUTP.write("%s\n" %(str(pp_sc_norm)))
OUTP.close()
################################################################################
def phylop_norm_train_scores(pos_pp_con_out, neg_pp_con_out,
dec_round=4,
int_whole_nr=True):
"""
Read in phyloP .pp.con file scores for positive and negative set,
normalize values to -1 ... 1 and overwrite (!) existing phyloP .pp.con files.
Normalization is min-max for negative and positive phyloP scores
separately. Min + max values are extracted from the union of positive
and negative set.
int_whole_nr:
If True, output whole numbers without decimal places.
"""
# Mean normalization for phyloP scores.
pos_con_dic = {}
neg_con_dic = {}
seq_id = ""
pp_max = -1000
pp_min = 1000
# Read in positive set phyloP scores.
with open(pos_pp_con_out) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
pos_con_dic[seq_id] = []
else:
pp_sc = float(line.strip())
pos_con_dic[seq_id].append(pp_sc)
if pp_sc > pp_max:
pp_max = pp_sc
if pp_sc < pp_min:
pp_min = pp_sc
f.closed
assert pos_con_dic, "no entries read into pos_con_dic dictionary"
# Read in negative set phyloP scores.
with open(neg_pp_con_out) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
neg_con_dic[seq_id] = []
else:
pp_sc = float(line.strip())
neg_con_dic[seq_id].append(pp_sc)
if pp_sc > pp_max:
pp_max = pp_sc
if pp_sc < pp_min:
pp_min = pp_sc
f.closed
assert neg_con_dic, "no entries read into neg_con_dic dictionary"
# Individual max values for min-max of positive+negative values.
pp_neg_max = abs(pp_min)
pp_neg_min = 0
pp_pos_max = pp_max
pp_pos_min = 0
# Mean normalize positive phylop scores and overwrite original file.
OUTP = open(pos_pp_con_out,"w")
for seq_id in pos_con_dic:
OUTP.write(">%s\n" %(seq_id))
for pp_sc in pos_con_dic[seq_id]:
if pp_sc == 0:
OUTP.write("0\n")
else:
if pp_sc < 0:
pp_sc_abs = abs(pp_sc)
pp_sc_norm = min_max_normalize(pp_sc_abs, pp_neg_max, pp_neg_min)
pp_sc_norm = round(pp_sc_norm, dec_round)
pp_sc_norm = -1*pp_sc_norm
else:
pp_sc_norm = min_max_normalize(pp_sc, pp_pos_max, pp_pos_min)
pp_sc_norm = round(pp_sc_norm, dec_round)
if pp_sc_norm == 0:
pp_sc_norm = 0
if int_whole_nr and not pp_sc_norm % 1:
OUTP.write("%i\n" %(int(pp_sc_norm)))
else:
OUTP.write("%s\n" %(str(pp_sc_norm)))
OUTP.close()
# Mean normalize negative phylop scores and overwrite original file.
OUTN = open(neg_pp_con_out,"w")
for seq_id in neg_con_dic:
OUTN.write(">%s\n" %(seq_id))
for pp_sc in neg_con_dic[seq_id]:
if pp_sc == 0:
OUTN.write("0\n")
else:
if pp_sc < 0:
pp_sc_abs = abs(pp_sc)
pp_sc_norm = min_max_normalize(pp_sc_abs, pp_neg_max, pp_neg_min)
pp_sc_norm = round(pp_sc_norm, dec_round)
pp_sc_norm = -1*pp_sc_norm
else:
pp_sc_norm = min_max_normalize(pp_sc, pp_pos_max, pp_pos_min)
pp_sc_norm = round(pp_sc_norm, dec_round)
if pp_sc_norm == 0:
pp_sc_norm = 0
if int_whole_nr and not pp_sc_norm % 1:
OUTN.write("%i\n" %(int(pp_sc_norm)))
else:
OUTN.write("%s\n" %(str(pp_sc_norm)))
OUTN.close()
################################################################################
def feat_min_max_norm_test_scores(test_feat_out,
p_values=False,
dec_round=4,
int_whole_nr=True):
"""
Read in feature file test_feat_out, min max normalize values,
and overwrite (!) existing test_feat_out file.
Min max normalization resulting in new scores from 0 to 1.
p_values:
If True, treat scores as p-values, i.e., normalized score
== 1 - score
int_whole_nr:
If True, output whole numbers without decimal places.
"""
sc_dic = {}
site_id = ""
sc_max = -1000000
sc_min = 1000000
# Read in test set phyloP scores.
with open(test_feat_out) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
site_id = m.group(1)
sc_dic[site_id] = []
else:
sc = float(line.strip())
sc_dic[site_id].append(sc)
if sc > sc_max:
sc_max = sc
if sc < sc_min:
sc_min = sc
f.closed
assert sc_dic, "no entries read into sc_dic dictionary"
# Min max normalize and output to original file.
OUTF = open(test_feat_out,"w")
for site_id in sc_dic:
OUTF.write(">%s\n" %(site_id))
for sc in sc_dic[site_id]:
if sc == 0:
if p_values:
OUTF.write("1\n")
else:
OUTF.write("0\n")
else:
if p_values:
sc_norm = 1 - sc
else:
sc_norm = min_max_normalize(sc, sc_max, sc_min)
sc_norm = round(sc_norm, dec_round)
if int_whole_nr and not sc_norm % 1:
OUTF.write("%i\n" %(int(sc_norm)))
else:
OUTF.write("%s\n" %(str(sc_norm)))
OUTF.close()
################################################################################
def gp_seq_only_generate_feat_annot(feat_file, test_out, seq_len_dic):
"""
Generate --feat-in annotation files for rnaprot gp and sequences-only
input.
--feat-in files need to have following format:
Input format:
test_id1<tab>-1,0.3,0.2,...,0.1
...
seq_len_dic:
Sequence lengths dictionary for checking.
>>> seq_len_dic = {'t1': 4, 't2': 3}
>>> feat_file = "test_data/test_seq_feat_gp.in"
>>> test_exp_out = "test_data/test_seq_feat_gp.exp.out"
>>> test_tmp_out = "test_data/test_seq_feat_gp.tmp.out"
>>> gp_seq_only_generate_feat_annot(feat_file, test_tmp_out, seq_len_dic)
>>> diff_two_files_identical(test_tmp_out, test_exp_out)
True
"""
feat_dic = {}
with open(feat_file) as f:
for line in f:
cols = line.strip().split("\t")
assert len(cols) == 2, "# columns != 2 for --feat-in file \"%s\", line \"%s\"" %(feat_file, line)
seq_id = cols[0]
assert seq_id not in feat_dic, "non-unique header \"%s\" in \"%s\"" % (seq_id, feat_file)
assert seq_id in seq_len_dic, "sequence ID \"%s\" from --feat-in file \"%s\" not part of training sequences set" %(seq_id, feat_file)
fl = cols[1].strip().split(",")
assert fl, "no feat_list extracted from file \"%s\", line \"%s\"" %(feat_file, line)
assert len(fl) == seq_len_dic[seq_id], "length of feature vector for sequence ID %s != sequence length (%i != %i)" %(seq_id, len(fl), seq_len_dic[seq_id])
#flf = [float(v) for v in fl]
feat_dic[seq_id] = fl
f.closed
assert feat_dic, "no feature values read in (input file \"%s\" empty or mal-formatted?)" %(feat_file)
assert len(seq_len_dic) == len(feat_dic), "len(seq_len_dic) != len(feat_dic) (%i != %i)" %(len(seq_len_dic), len(feat_dic))
# Output positives and negatives feature files.
OUTF = open(test_out,"w")
for seq_id in feat_dic:
OUTF.write(">%s\n" %(seq_id))
for v in feat_dic[seq_id]:
OUTF.write("%s\n" %(v))
OUTF.close()
################################################################################
def gt_seq_only_generate_feat_annot(feat_file, pos_out, neg_out,
seq_len_dic, seq_label_dic):
"""
Generate --feat-in annotation files for rnaprot gt and sequences-only
input.
--feat-in files need to have following format:
Input format:
pos_id1<tab>-1,0.3,0.2,...,0.1
...
neg_id1<tab>-1,0.3,0.2,...,0.1
seq_len_dic:
Sequence lengths dictionary for checking.
seq_label_dic:
Sequence ID -> class label dictionary (1: positives, 2: negatives).
>>> seq_len_dic = {'p1': 4, 'p2': 4, 'n1': 4, 'n2': 3}
>>> seq_label_dic = {'p1': 1, 'p2': 1, 'n1': 0, 'n2': 0}
>>> feat_file = "test_data/test_seq_feat.in"
>>> pos_exp_out = "test_data/test_seq_feat_pos.exp.out"
>>> neg_exp_out = "test_data/test_seq_feat_neg.exp.out"
>>> pos_tmp_out = "test_data/test_seq_feat_pos.tmp.out"
>>> neg_tmp_out = "test_data/test_seq_feat_neg.tmp.out"
>>> gt_seq_only_generate_feat_annot(feat_file, pos_tmp_out, neg_tmp_out, seq_len_dic, seq_label_dic)
>>> diff_two_files_identical(pos_tmp_out, pos_exp_out)
True
>>> diff_two_files_identical(neg_tmp_out, neg_exp_out)
True
"""
feat_dic = {}
with open(feat_file) as f:
for line in f:
cols = line.strip().split("\t")
assert len(cols) == 2, "# columns != 2 for --feat-in file \"%s\", line \"%s\"" %(feat_file, line)
seq_id = cols[0]
assert seq_id not in feat_dic, "non-unique header \"%s\" in \"%s\"" % (seq_id, feat_file)
assert seq_id in seq_len_dic, "sequence ID \"%s\" from --feat-in file \"%s\" not part of training sequences set" %(seq_id, feat_file)
fl = cols[1].strip().split(",")
assert fl, "no feat_list extracted from file \"%s\", line \"%s\"" %(feat_file, line)
assert len(fl) == seq_len_dic[seq_id], "length of feature vector for sequence ID %s != sequence length (%i != %i)" %(seq_id, len(fl), seq_len_dic[seq_id])
#flf = [float(v) for v in fl]
feat_dic[seq_id] = fl
f.closed
assert feat_dic, "no feature values read in (input file \"%s\" empty or mal-formatted?)" %(feat_file)
assert len(seq_len_dic) == len(feat_dic), "len(seq_len_dic) != len(feat_dic) (%i != %i)" %(len(seq_len_dic), len(feat_dic))
# Output positives and negatives feature files.
OUTP = open(pos_out,"w")
OUTN = open(neg_out,"w")
for seq_id in feat_dic:
if seq_label_dic[seq_id] == 1:
# Positives.
OUTP.write(">%s\n" %(seq_id))
for v in feat_dic[seq_id]:
OUTP.write("%s\n" %(v))
else:
# Negatives.
OUTN.write(">%s\n" %(seq_id))
for v in feat_dic[seq_id]:
OUTN.write("%s\n" %(v))
OUTP.close()
OUTN.close()
################################################################################
def feat_min_max_norm_train_scores(pos_feat_out, neg_feat_out,
p_values=False,
dec_round=4,
int_whole_nr=True):
"""
Read in feature files for positive and negative set, min max normalize
values, and overwrite (!) existing feature files.
Min max normalization resulting in new scores from 0 to 1.
p_values:
If True, treat scores as p-values, i.e., normalized score
== 1 - score
int_whole_nr:
If True, output whole numbers without decimal places.
"""
pos_sc_dic = {}
neg_sc_dic = {}
site_id = ""
sc_max = -1000000
sc_min = 1000000
# Read in positive scores.
with open(pos_feat_out) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
site_id = m.group(1)
pos_sc_dic[site_id] = []
else:
sc = float(line.strip())
pos_sc_dic[site_id].append(sc)
if sc > sc_max:
sc_max = sc
if sc < sc_min:
sc_min = sc
f.closed
assert pos_sc_dic, "no entries read into pos_sc_dic dictionary"
# Read in negative scores.
with open(neg_feat_out) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
site_id = m.group(1)
neg_sc_dic[site_id] = []
else:
sc = float(line.strip())
neg_sc_dic[site_id].append(sc)
if sc > sc_max:
sc_max = sc
if sc < sc_min:
sc_min = sc
f.closed
assert neg_sc_dic, "no entries read into neg_sc_dic dictionary"
# Min max normalize positive scores and output to original file.
OUTP = open(pos_feat_out,"w")
for site_id in pos_sc_dic:
OUTP.write(">%s\n" %(site_id))
for sc in pos_sc_dic[site_id]:
if sc == 0:
if p_values:
OUTP.write("1\n")
else:
OUTP.write("0\n")
else:
if p_values:
sc_norm = 1 - sc
else:
sc_norm = min_max_normalize(sc, sc_max, sc_min)
sc_norm = round(sc_norm, dec_round)
if int_whole_nr and not sc_norm % 1:
OUTP.write("%i\n" %(int(sc_norm)))
else:
OUTP.write("%s\n" %(str(sc_norm)))
OUTP.close()
# Min max normalize negative scores and output to original file.
OUTN = open(neg_feat_out,"w")
for site_id in neg_sc_dic:
OUTN.write(">%s\n" %(site_id))
for sc in neg_sc_dic[site_id]:
if sc == 0:
if p_values:
OUTN.write("1\n")
else:
OUTN.write("0\n")
else:
if p_values:
sc_norm = 1 - sc
else:
sc_norm = min_max_normalize(sc, sc_max, sc_min)
sc_norm = round(sc_norm, dec_round)
if int_whole_nr and not sc_norm % 1:
OUTN.write("%i\n" %(int(sc_norm)))
else:
OUTN.write("%s\n" %(str(sc_norm)))
OUTN.close()
################################################################################
def bed_get_feature_annotations(in_bed, feat_bed, feat_out,
feat_type="C",
stats_dic=None,
split_size=60,
disable_pol=False):
"""
Overlap in_bed with feat_bed, and annotate overlapping regions
depending on set feat_type (C, N).
C: categorical, one-hot, store 1 for overlapping position and
0 for not overlapping position.
N: numerical, i.e., use column 5 feat_bed score to store as score
for each overlapping position, and 0 for not overlapping position.
Store feature file to feat_out.
Format of feat_out file depends on feat_type:
if "C":
>id1
00000111111
11110000000
...
if "N":
>id1
value1
value2
...
in_bed:
Input BED regions, annotate each position.
feat_bed:
Feature BED regions, use for annotating in_bed positions.
feat_out:
Output feature annotation file. Format depends on feat_type.
feat_type:
"C" for categorical, or "N" for numerical output annotations.
stats_dic:
2 store ya stats, bro.
split_size:
Split size for outputting C labels (FASTA style row width).
disable_pol:
If yes, disable strandedness (== do not set -s in intersectBed),
i.e., do not differentiate between strands when adding
annotations.
>>> in_bed = "test_data/feat_in.bed"
>>> feat_bed_old_nick = "test_data/feat_old_nick.bed"
>>> feat_bed_feat_666 = "test_data/feat_666.bed"
>>> old_nick_exp1 = "test_data/feat_old_nick_1.exp.out"
>>> old_nick_exp2 = "test_data/feat_old_nick_2.exp.out"
>>> feat_666_exp1 = "test_data/feat_666_1.exp.out"
>>> feat_666_exp2 = "test_data/feat_666_2.exp.out"
>>> old_nick_out = "test_data/test.tmp.old_nick"
>>> feat_666_out = "test_data/test.tmp.feat_666"
>>> bed_get_feature_annotations(in_bed, feat_bed_old_nick, old_nick_out, feat_type="C", disable_pol=True)
>>> diff_two_files_identical(old_nick_out, old_nick_exp1)
True
>>> bed_get_feature_annotations(in_bed, feat_bed_feat_666, feat_666_out, feat_type="N", disable_pol=True)
>>> diff_two_files_identical(feat_666_out, feat_666_exp1)
True
>>> bed_get_feature_annotations(in_bed, feat_bed_old_nick, old_nick_out, feat_type="C", disable_pol=False)
>>> diff_two_files_identical(old_nick_out, old_nick_exp2)
True
>>> bed_get_feature_annotations(in_bed, feat_bed_feat_666, feat_666_out, feat_type="N", disable_pol=False)
>>> diff_two_files_identical(feat_666_out, feat_666_exp2)
True
"""
# Checks.
ftl = ["C", "N"]
assert feat_type in ftl, "invalid feat_type given (allowed: C,N)"
# Temp overlap results file.
random_id = uuid.uuid1()
tmp_out = str(random_id) + ".tmp.out"
if stats_dic is not None:
stats_dic["total_pos"] = 0
stats_dic["feat_type"] = feat_type
stats_dic["zero_sites"] = 0
stats_dic["total_sites"] = 0
stats_dic["mean_l"] = 0
stats_dic["median_l"] = 0
stats_dic["min_l"] = 0
stats_dic["max_l"] = 0
stats_dic["stdev_l"] = 0
if feat_type == "C":
stats_dic["0"] = 0
stats_dic["1"] = 0
else:
stats_dic["mean"] = 0
stats_dic["stdev"] = 0
stats_dic["zero_pos"] = 0
# Value list.
v_list = []
# BED region lengths list.
len_list = []
# Read in in_bed, store start + end coordinates.
id2s_dic = {}
id2e_dic = {}
# Store positional values list for each site in dic.
id2vl_dic = {}
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
assert site_id not in id2s_dic, "non-unique site ID \"%s\" in in_bed" %(site_id)
id2s_dic[site_id] = site_s
id2e_dic[site_id] = site_e
site_l = site_e - site_s
id2vl_dic[site_id] = ["0"]*site_l
f.closed
assert id2s_dic, "given in_bed \"%s\" empty?" %(in_bed)
# Store feature region lengths.
if stats_dic:
with open(feat_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
site_s = int(cols[1])
site_e = int(cols[2])
site_l = site_e - site_s
len_list.append(site_l)
f.closed
# Run overlap calculation to get overlapping regions.
intersect_params = "-s -wb"
if disable_pol:
intersect_params = "-wb"
intersect_bed_files(in_bed, feat_bed, intersect_params, tmp_out)
# Get annotations.
with open(tmp_out) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
s = int(cols[1]) + 1 # Make one-based.
e = int(cols[2])
site_id = cols[3]
site_s = id2s_dic[site_id] + 1 # Make one-based.
site_e = id2e_dic[site_id]
site_pol = cols[5]
score = cols[10]
# + case.
if site_pol == "+" or disable_pol:
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = i - site_s
if feat_type == "C":
id2vl_dic[site_id][li] = "1"
else:
id2vl_dic[site_id][li] = score
else:
for i in range(site_s, site_e+1):
if i >= s and i <= e:
# Get list index.
li = site_e - i
if feat_type == "C":
id2vl_dic[site_id][li] = "1"
else:
id2vl_dic[site_id][li] = score
f.closed
# Output annotations to file.
OUTLAB = open(feat_out,"w")
# Output labels for each site.
for site_id in id2vl_dic:
if feat_type == "C":
# List to string.
label_str = "".join(id2vl_dic[site_id])
OUTLAB.write(">%s\n" %(site_id))
for i in range(0, len(label_str), split_size):
OUTLAB.write("%s\n" %((label_str[i:i+split_size])))
#OUTLAB.write("%s\t%s\n" %(site_id, label_str))
if stats_dic:
stats_dic["total_sites"] += 1
site_0 = True
for v in id2vl_dic[site_id]:
stats_dic[v] += 1
if v == "1":
site_0 = False
stats_dic["total_pos"] += 1
if site_0:
stats_dic["zero_sites"] += 1
else:
OUTLAB.write(">%s\n" %(site_id))
site_0 = True
for v in id2vl_dic[site_id]:
OUTLAB.write("%s\n" %(v))
if stats_dic:
v_list.append(float(v))
if v == "0":
stats_dic["zero_pos"] += 1
else:
site_0 = False
if stats_dic:
stats_dic["total_sites"] += 1
if site_0:
stats_dic["zero_sites"] += 1
OUTLAB.close()
# Additional stats if feat_type numerical.
if stats_dic:
if feat_type == "N":
assert v_list, "no values stored in v_list"
stats_dic["mean"] = statistics.mean(v_list)
stats_dic["stdev"] = statistics.stdev(v_list)
stats_dic["total_pos"] = len(v_list)
assert len_list, "no lengths stored in length list"
stats_dic["mean_l"] = statistics.mean(len_list)
stats_dic["median_l"] = statistics.median(len_list)
stats_dic["stdev_l"] = statistics.stdev(len_list)
stats_dic["max_l"] = max(len_list)
stats_dic["min_l"] = min(len_list)
# Take out the trash.
litter_street = True
if litter_street:
if os.path.exists(tmp_out):
os.remove(tmp_out)
################################################################################
def get_valid_file_ending(s):
"""
Modified after:
https://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename
def get_valid_filename(s):
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
In addition, start and end of file ending should start with word or
number.
>>> s = "___.hallole123.so_hallole123.___"
>>> get_valid_file_ending(s)
'hallole123.so_hallole123'
>>> get_valid_file_ending("john's new arctic warfare")
'johns_new_arctic_warfare'
"""
assert s, "given s empty"
# Strip and replace spaces with _.
s = str(s).strip().replace(' ', '_')
# Remove non-word characters from start and end.
m = re.search('\W*([a-zA-Z0-9].+[a-zA-Z0-9])\W*', s)
if m:
return re.sub(r'(?u)[^-\w.]', '', m.group(1))
else:
return re.sub(r'(?u)[^-\w.]', '', s)
################################################################################
def get_length_stats_from_seqs_dic(seqs_dic):
"""
Get length stats from set of sequences stored in dictionary.
Return dictionary with stats.
"""
assert seqs_dic, "given seqs_dic empty"
seq_len_list = []
for seq_id in seqs_dic:
seq_len_list.append(len(seqs_dic[seq_id]))
seq_stats_dic = {}
seq_stats_dic["mean"] = statistics.mean(seq_len_list)
seq_stats_dic["median"] = int(statistics.median(seq_len_list))
seq_stats_dic["max"] = int(max(seq_len_list))
seq_stats_dic["min"] = int(max(seq_len_list))
seq_stats_dic["stdev"] = statistics.stdev(seq_len_list)
return seq_stats_dic
################################################################################
def load_eval_data(args,
load_negatives=False,
store_tensors=True,
train_folder=False,
kmer2idx_dic=False,
num_features=False,
embed=None,
embed_k=False,
str_mode=False):
"""
Load training data for rnaprot eval, to generate motifs and profiles.
"""
# If model params are in args.
if not train_folder:
train_folder = args.in_train_folder
if not num_features:
num_features = args.n_feat
if not str_mode:
str_mode = args.str_mode
if embed is None:
embed = args.embed
if not embed_k:
embed_k = args.embed_k
if embed:
assert kmer2idx_dic, "embed enabled but missing kmer2idx_dic"
assert os.path.isdir(args.in_gt_folder), "--gt-in folder does not exist"
# Feature file containing info for features used for model training.
assert os.path.isdir(train_folder), "model training folder %s does not exist" %(train_folder)
feat_file = train_folder + "/" + "features.out"
assert os.path.exists(feat_file), "%s features file expected but not does not exist" %(feat_file)
# rnaprot predict output folder.
out_folder = args.out_folder
if not os.path.exists(out_folder):
os.makedirs(out_folder)
# Channel info output file.
channel_infos_out = out_folder + "/" + "channel_infos.out"
channel_info_list = []
channel_nr = 0
# Read in feature info.
fid2type_dic = {}
fid2cat_dic = {} # Store category labels or numerical score IDs in list.
fid2norm_dic = {}
fid2row_dic = {}
print("Read in feature infos from %s ... " %(feat_file))
with open(feat_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
feat_id = cols[0]
feat_type = cols[1]
feat_cat_list = cols[2].split(",")
feat_cat_list.sort()
feat_norm = cols[3]
fid2row_dic[feat_id] = row
assert feat_id not in fid2type_dic, "feature ID \"%s\" found twice in feature file" %(feat_id)
fid2type_dic[feat_id] = feat_type
fid2cat_dic[feat_id] = feat_cat_list
fid2norm_dic[feat_id] = feat_norm
f.closed
assert fid2type_dic, "no feature infos read in from rnaprot train feature file %s" %(feat_file)
# Read in features.out from rnaprot gt and check.
gt_feat_file = args.in_gt_folder + "/" + "features.out"
assert os.path.exists(gt_feat_file), "%s features file expected but not does not exist" %(gt_feat_file)
gt_fid2row_dic = {}
print("Read in feature infos from %s ... " %(gt_feat_file))
with open(gt_feat_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
feat_id = cols[0]
gt_fid2row_dic[feat_id] = row
f.closed
assert gt_fid2row_dic, "no feature infos found in --gt-in feature file %s" %(gt_feat_file)
# Pairwise feature comparison (note that --gt-in can have more features, but should have the same!).
for fid in fid2row_dic:
assert fid in gt_fid2row_dic, "rnaprot train feature ID \"%s\" not found in %s" %(fid, gt_feat_file)
# Structure can differ between gt and train features.out (depending on set --str-mode in train).
if fid != "str":
assert fid2row_dic[fid] == gt_fid2row_dic[fid], "feature ID \"%s\" annotation varies between --gt-in and model folder %s features.out (\"%s\" vs \"%s\"). Features that a model was trained on need to be present in --gt-in" %(fid, train_folder, gp_fid_dic[fid], train_fid_dic[fid])
# Check sequence feature.
assert "fa" in fid2type_dic, "feature ID \"fa\" not in feature file"
assert fid2cat_dic["fa"] == ["A", "C", "G", "U"], "sequence feature alphabet != A,C,G,U"
# Read in FASTA sequences.
pos_fa_in = args.in_gt_folder + "/" + "positives.fa"
if load_negatives:
pos_fa_in = args.in_gt_folder + "/" + "negatives.fa"
assert os.path.exists(pos_fa_in), "--gt-in folder does not contain %s" %(pos_fa_in)
print("Read in sequences ... ")
seqs_dic = read_fasta_into_dic(pos_fa_in, all_uc=True)
assert seqs_dic, "no sequences read in from FASTA file \"%s\"" %(pos_fa_in)
# Data dictionaries.
feat_dic = {}
# Init feat_dic (storing node feature vector data) with sequence one-hot encodings.
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
if embed:
feat_dic[seq_id] = convert_seq_to_kmer_embedding(seq, embed_k, kmer2idx_dic,
l2d=True)
else:
feat_dic[seq_id] = string_vectorizer(seq, custom_alphabet=fid2cat_dic["fa"])
# Channel info dictionary.
ch_info_dic = {}
# Add sequence one-hot channels.
ch_info_dic["fa"] = ["C", [], [], "-"]
if embed:
channel_nr += 1
# Add sequence embedding channel.
channel_id = "embed"
channel_info = "%i\t%s\tfa\tC\tembedding" %(channel_nr, channel_id)
channel_info_list.append(channel_info)
ch_info_dic["fa"][1].append(channel_nr-1)
ch_info_dic["fa"][2].append(channel_id)
ch_info_dic["fa"][3] = "embed"
else:
for c in fid2cat_dic["fa"]:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\tfa\tC\tone_hot" %(channel_nr, channel_id)
channel_info_list.append(channel_info)
ch_info_dic["fa"][1].append(channel_nr-1)
ch_info_dic["fa"][2].append(channel_id)
ch_info_dic["fa"][3] = "one_hot"
# Check and read in more data.
for fid, ftype in sorted(fid2type_dic.items()): # fid e.g. fa, ftype: C,N.
if fid == "fa": # already added to feat_dic (first item).
continue
# All features (additional to .fa) like .elem_p.str, .con, .eia, .tra, .rra, or user defined.
feat_alphabet = fid2cat_dic[fid]
pos_feat_in = args.in_gt_folder + "/positives." + fid
if load_negatives:
pos_feat_in = args.in_gt_folder + "/negatives." + fid
assert os.path.exists(pos_feat_in), "--in folder does not contain %s" %(pos_feat_in)
print("Read in .%s annotations ... " %(fid))
# Load tructure data according to set str_mode in train.
if fid == "str":
# Deal with structure data.
feat_dic = read_str_feat_into_dic(pos_feat_in,
str_mode=str_mode,
feat_dic=feat_dic)
assert feat_dic, "no .%s information read in (feat_dic empty)" %(fid)
if str_mode == 1:
encoding = fid2norm_dic[fid]
ch_info_dic[fid] = ["N", [], [], encoding]
# Same as read in.
for c in feat_alphabet:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
elif str_mode == 2:
ch_info_dic[fid] = ["C", [], [], "one_hot"]
for c in feat_alphabet:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\t%s\tC\tone_hot" %(channel_nr, channel_id, fid)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
elif str_mode == 3:
channel_nr += 1
channel_id = "up"
encoding = "prob"
ch_info_dic[fid] = ["N", [], [], encoding]
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
elif str_mode == 4:
ch_info_dic[fid] = ["C", [], [], "one_hot"]
channel_nr += 1
channel_info = "%i\tP\tstr\tC\tone_hot" %(channel_nr)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append("P")
channel_nr += 1
channel_info = "%i\tU\tstr\tC\tone_hot" %(channel_nr)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append("U")
else:
assert False, "invalid str_mode given"
else:
"""
All features (additional to .fa and .str) like
.pc.con, .pp.con, .eia, .tra, .rra, or user-defined.
"""
feat_dic = read_feat_into_dic(pos_feat_in, ftype,
feat_dic=feat_dic,
label_list=feat_alphabet)
assert feat_dic, "no .%s information read in (feat_dic empty)" %(fid)
encoding = fid2norm_dic[fid]
ch_info_dic[fid] = [ftype, [], [], encoding]
if ftype == "N":
for c in feat_alphabet:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
elif ftype == "C":
for c in feat_alphabet:
channel_nr += 1
#channel_id = fid + "_" + c
channel_id = c
channel_info = "%i\t%s\t%s\tC\tone_hot" %(channel_nr, channel_id, fid)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
else:
assert False, "invalid feature type given (%s) for feature %s" %(ftype,fid)
# Output channel infos.
CIOUT = open(channel_infos_out, "w")
CIOUT.write("ch\tch_id\tfeat_id\tfeat_type\tencoding\n")
for ch_info in channel_info_list:
CIOUT.write("%s\n" %(ch_info))
CIOUT.close()
"""
Generate list of feature lists all_features and more stuff to return.
"""
# Sequence ID list + label list.
seq_ids_list = []
label_list = []
idx2id_dic = {}
id2idx_dic = {}
i = 0
for seq_id,seq in sorted(seqs_dic.items()):
seq_ids_list.append(seq_id)
label_list.append(i)
id2idx_dic[seq_id] = i
idx2id_dic[i] = seq_id
i += 1
# Store node data in list of 2d lists.
all_features = []
for idx, label in enumerate(label_list):
seq_id = seq_ids_list[idx]
seq = seqs_dic[seq_id]
l_seq = len(seq)
# Checks.
check_num_feat = len(feat_dic[seq_id][0])
assert num_features == check_num_feat, "# features (num_features) from model parameter file != loaded number of node features (%i != %i)" %(model_num_feat, check_num_feat)
# Add to all_features list as tensor.
if store_tensors:
all_features.append(torch.tensor(feat_dic[seq_id], dtype=torch.float))
else:
all_features.append(feat_dic[seq_id])
# Return some double talking jive data.
assert all_features, "all_features empty"
return seqs_dic, idx2id_dic, all_features, ch_info_dic
################################################################################
def load_predict_data(args,
kmer2idx_dic=False,
list_site_ids=False,
store_tensors=True):
"""
Load prediction data from RNAProt predict output folder
and return either as list of graphs or list of feature lists.
kmer2idx_dic:
Provide k-mer to index mapping dictionary for k-mer embedding.
store_tensors:
Store data as tensors in returned all_features.
"""
# Checks.
assert os.path.isdir(args.in_folder), "--in folder does not exist"
assert os.path.isdir(args.in_train_folder), "--train-in model folder does not exist"
if args.embed:
assert kmer2idx_dic, "embed enabled but missing kmer2idx_dic"
assert args.embed_k, "embed enabled but missing embed_k"
# Feature file containing info for features used for model training.
feat_file = args.in_train_folder + "/" + "features.out"
assert os.path.exists(feat_file), "%s features file expected but not does not exist" %(feat_file)
# Read in model parameters.
params_file = args.in_train_folder + "/final.params"
assert os.path.isfile(params_file), "missing model training parameter file %s" %(params_file)
params_dic = read_settings_into_dic(params_file)
assert "n_feat" in params_dic, "num_features info missing in model parameters file %s" %(params_file)
model_num_feat = int(params_dic["n_feat"])
# rnaprot predict output folder.
out_folder = args.out_folder
if not os.path.exists(out_folder):
os.makedirs(out_folder)
# Channel info output file.
channel_infos_out = out_folder + "/" + "channel_infos.out"
channel_info_list = []
channel_nr = 0
# Read in feature info.
fid2type_dic = {}
fid2cat_dic = {} # Store category labels or numerical score IDs in list.
fid2norm_dic = {}
fid2row_dic = {}
print("Read in feature infos from %s ... " %(feat_file))
with open(feat_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
feat_id = cols[0]
feat_type = cols[1]
feat_cat_list = cols[2].split(",")
feat_cat_list.sort()
feat_norm = cols[3]
fid2row_dic[feat_id] = row
assert feat_id not in fid2type_dic, "feature ID \"%s\" found twice in feature file" %(feat_id)
fid2type_dic[feat_id] = feat_type
fid2cat_dic[feat_id] = feat_cat_list
fid2norm_dic[feat_id] = feat_norm
f.closed
assert fid2type_dic, "no feature infos read in from rnaprot train feature file %s" %(feat_file)
# Check sequence feature.
assert "fa" in fid2type_dic, "feature ID \"fa\" not in feature file"
assert fid2cat_dic["fa"] == ["A", "C", "G", "U"], "sequence feature alphabet != A,C,G,U"
# Read in FASTA sequences.
test_fa_in = args.in_folder + "/" + "test.fa"
assert os.path.exists(test_fa_in), "--in folder does not contain %s" %(test_fa_in)
print("Read in sequences ... ")
test_seqs_dic = read_fasta_into_dic(test_fa_in, all_uc=True)
assert test_seqs_dic, "no sequences read in from FASTA file \"%s\"" %(test_fa_in)
# Check for 4 (8) distinct nucleotides.
cc_dic = seqs_dic_count_chars(test_seqs_dic)
allowed_nt_dic = {'A': 1, 'C': 1, 'G': 1, 'U': 1}
c_nts = 4
for nt in cc_dic:
if nt not in allowed_nt_dic:
assert False, "sequences with invalid character \"%s\" encountered (allowed characters: ACGU" %(nt)
assert len(cc_dic) == c_nts, "# of distinct nucleotide characters in sequences != expected # (%i != %i)" %(len(cc_dic), c_nts)
# Data dictionaries.
feat_dic = {}
# Init feat_dic (storing node feature vector data) with sequence one-hot encodings.
for seq_id in test_seqs_dic:
seq = test_seqs_dic[seq_id]
if args.embed:
feat_dic[seq_id] = convert_seq_to_kmer_embedding(seq, args.embed_k, kmer2idx_dic,
l2d=True)
else:
feat_dic[seq_id] = string_vectorizer(seq, custom_alphabet=fid2cat_dic["fa"])
# Channel info dictionary.
ch_info_dic = {}
# Add sequence one-hot channels.
ch_info_dic["fa"] = ["C", [], [], "-"]
if args.embed:
channel_nr += 1
# Add sequence embedding channel.
channel_info = "%i\tembed\tfa\tC\tembedding" %(channel_nr)
channel_info_list.append(channel_info)
ch_info_dic["fa"][1].append(channel_nr-1)
ch_info_dic["fa"][2].append(channel_id)
ch_info_dic["fa"][3] = "embed"
else:
# Add sequence one-hot channels.
for c in fid2cat_dic["fa"]:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\tfa\tC\tone_hot" %(channel_nr, channel_id)
channel_info_list.append(channel_info)
ch_info_dic["fa"][1].append(channel_nr-1)
ch_info_dic["fa"][2].append(channel_id)
ch_info_dic["fa"][3] = "one_hot"
# Check and read in more data.
for fid, ftype in sorted(fid2type_dic.items()): # fid e.g. fa, ftype: C,N.
if fid == "fa": # already added to feat_dic (first item).
continue
feat_alphabet = fid2cat_dic[fid]
test_feat_in = args.in_folder + "/test." + fid
assert os.path.exists(test_feat_in), "--in folder does not contain %s" %(test_feat_in)
print("Read in .%s annotations ... " %(fid))
if fid == "str":
# Deal with structure data.
feat_dic = read_str_feat_into_dic(test_feat_in,
str_mode=args.str_mode,
feat_dic=feat_dic)
assert feat_dic, "no .%s information read in (feat_dic empty)" %(fid)
# Set channel infos depending on str_mode.
if args.str_mode == 1:
encoding = fid2norm_dic[fid]
ch_info_dic[fid] = ["N", [], [], encoding]
# Same as read in.
for c in feat_alphabet:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
elif args.str_mode == 2:
ch_info_dic[fid] = ["C", [], [], "one_hot"]
for c in feat_alphabet:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\t%s\tC\tone_hot" %(channel_nr, channel_id, fid)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
elif args.str_mode == 3:
channel_nr += 1
channel_id = "up"
encoding = "prob"
ch_info_dic[fid] = ["N", [], [], encoding]
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
elif args.str_mode == 4:
ch_info_dic[fid] = ["C", [], [], "one_hot"]
channel_nr += 1
channel_info = "%i\tP\tstr\tC\tone_hot" %(channel_nr)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append("P")
channel_nr += 1
channel_info = "%i\tU\tstr\tC\tone_hot" %(channel_nr)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append("U")
else:
assert False, "invalid str_mode given"
else:
"""
All features (additional to .fa and .str) like
.pc.con, .pp.con, .eia, .tra, .rra, or user-defined.
"""
feat_dic = read_feat_into_dic(test_feat_in, ftype,
feat_dic=feat_dic,
label_list=feat_alphabet)
assert feat_dic, "no .%s information read in (feat_dic empty)" %(fid)
encoding = fid2norm_dic[fid]
ch_info_dic[fid] = [ftype, [], [], encoding]
if ftype == "N":
for c in feat_alphabet:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
elif ftype == "C":
for c in feat_alphabet:
channel_nr += 1
#channel_id = fid + "_" + c
channel_id = c
channel_info = "%i\t%s\t%s\tC\tone_hot" %(channel_nr, channel_id, fid)
channel_info_list.append(channel_info)
ch_info_dic[fid][1].append(channel_nr-1)
ch_info_dic[fid][2].append(channel_id)
else:
assert False, "invalid feature type given (%s) for feature %s" %(ftype,fid)
# Output channel infos.
CIOUT = open(channel_infos_out, "w")
CIOUT.write("ch\tch_id\tfeat_id\tfeat_type\tencoding\n")
for ch_info in channel_info_list:
CIOUT.write("%s\n" %(ch_info))
CIOUT.close()
"""
Generate list of feature lists all_features and sum mo stuff 2 return.
"""
# Sequence ID list + label list.
seq_ids_list = []
label_list = []
idx2id_dic = {}
id2idx_dic = {}
i = 0
for seq_id,seq in sorted(test_seqs_dic.items()):
seq_ids_list.append(seq_id)
label_list.append(i)
id2idx_dic[seq_id] = i
idx2id_dic[i] = seq_id
i += 1
# Construct features list.
all_features = []
select_ids_dic = {}
if list_site_ids:
for site_id in list_site_ids:
assert site_id in test_seqs_dic, "provided --site-id %s not part of --in set" %(site_id)
select_ids_dic[site_id] = 1
new_idx2id_dic = {}
new_seqs_dic = {}
new_idx_c = 0
for idx, label in enumerate(label_list):
seq_id = seq_ids_list[idx]
seq = test_seqs_dic[seq_id]
l_seq = len(seq)
# Checks.
check_num_feat = len(feat_dic[seq_id][0])
assert model_num_feat == check_num_feat, "# features (num_features) from model parameter file != loaded number of node features (%i != %i)" %(model_num_feat, check_num_feat)
if select_ids_dic:
if seq_id in select_ids_dic:
new_seqs_dic[seq_id] = seq
new_idx2id_dic[new_idx_c] = seq_id
new_idx_c += 1
if store_tensors:
all_features.append(torch.tensor(feat_dic[seq_id], dtype=torch.float))
else:
all_features.append(feat_dic[seq_id])
else:
if store_tensors:
all_features.append(torch.tensor(feat_dic[seq_id], dtype=torch.float))
else:
all_features.append(feat_dic[seq_id])
assert all_features, "no features stored in all_features (all_features empty)"
# Return some double talking jive data.
if select_ids_dic:
return new_seqs_dic, new_idx2id_dic, all_features, ch_info_dic
else:
return test_seqs_dic, idx2id_dic, all_features, ch_info_dic
################################################################################
def read_str_feat_into_dic(str_feat_file,
str_mode=1,
feat_dic=False):
"""
Read in .str feature data into dictionary.
Depending on set --str-mode from rnaprot train, store data differently.
Example:
>CLIP_01
0.1 0.2 0.4 0.2 0.1
0.2 0.3 0.2 0.1 0.2
>>> str_feat_file = "test_data/test.elem_p.str"
>>> read_str_feat_into_dic(str_feat_file, str_mode=1)
{'CLIP_01': [[0.1, 0.2, 0.4, 0.2, 0.1], [0.2, 0.3, 0.2, 0.1, 0.2]]}
>>> read_str_feat_into_dic(str_feat_file, str_mode=2)
{'CLIP_01': [[0, 0, 1, 0, 0], [0, 1, 0, 0, 0]]}
>>> read_str_feat_into_dic(str_feat_file, str_mode=3)
{'CLIP_01': [[0.9], [0.8]]}
>>> read_str_feat_into_dic(str_feat_file, str_mode=4)
{'CLIP_01': [[0, 1], [0, 1]]}
"""
feat_dic_given = False
if not feat_dic:
feat_dic = {}
else:
feat_dic_given = True
str_mode_check = [1,2,3,4]
assert str_mode in str_mode_check, "invalid str_mode given"
seq_id = ""
pos_i = 0
with open(str_feat_file) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
# Init only necessary if no populated / initialized feat_dic given.
if not feat_dic_given:
feat_dic[seq_id] = []
pos_i = 0
else:
vl = line.strip().split('\t')
# E H I M S.
for i,v in enumerate(vl):
vl[i] = float(v)
if str_mode == 1:
if feat_dic_given:
for v in vl:
feat_dic[seq_id][pos_i].append(v)
else:
feat_dic[seq_id].append(vl)
elif str_mode == 2:
vl_1h = convert_prob_list_to_1h(vl)
vl = vl_1h
if feat_dic_given:
for v in vl:
feat_dic[seq_id][pos_i].append(v)
else:
feat_dic[seq_id].append(vl)
elif str_mode == 3:
u_p = 1 - vl[4]
if u_p < 0:
u_p = 0.0
if u_p > 1:
u_p = 1.0
if feat_dic_given:
feat_dic[seq_id][pos_i].append(u_p)
else:
feat_dic[seq_id].append([u_p])
elif str_mode == 4:
u_p = 1 - vl[4]
if u_p < 0:
u_p = 0.0
if u_p > 1:
u_p = 1.0
vl_1h = [1, 0]
if u_p > vl[4]:
vl_1h = [0, 1]
if feat_dic_given:
for v in vl_1h:
feat_dic[seq_id][pos_i].append(v)
else:
feat_dic[seq_id].append(vl_1h)
else:
assert False, "invalid str_mode set"
pos_i += 1
f.closed
assert feat_dic, "feat_dic empty"
return feat_dic
################################################################################
def get_peak_region(pos, sc_list,
reverse=False,
set_mean=False):
"""
For a list of scores get peak region around score position which
lies above the mean score. Return 0-based start, 1-based end.
reverse:
Instead of region above, get region below the mean. Set this True
if negative scores indicate stronger signal.
set_mean:
Instead of using mean of scores list, use provided mean to get
the peak region.
>>> sc_list = [1,2,3,2,0.5,1]
>>> get_peak_region(2, sc_list, set_mean=1)
(0, 4)
>>> sc_list = [-1,-2,-3,-2,-2,-0.5, 0]
>>> get_peak_region(2, sc_list, reverse=True, set_mean=-1)
(0, 5)
>>> sc_list = [-5,-2,-3,-2,-4,-0.5, 0]
>>> get_peak_region(0, sc_list, reverse=True, set_mean=-4)
(0, 1)
>>> sc_list = [5]
>>> get_peak_region(0, sc_list, set_mean=5)
(0, 1)
"""
assert sc_list, "sc_list empty"
assert len(sc_list) > pos, "len(sc_list) <= pos, but needs to be at least pos+1"
# Mean score.
mean_sc = statistics.mean(sc_list)
if set_mean:
mean_sc = set_mean
# Init start + end of > mean region.
s = pos
e = pos
# Get start + end positions above (or below if reverse) mean.
for i in reversed(range(pos)):
if reverse:
if sc_list[i] <= mean_sc:
s = i
else:
break
else:
if sc_list[i] >= mean_sc:
s = i
else:
break
for i in range(pos+1, len(sc_list)):
if reverse:
if sc_list[i] <= mean_sc:
e = i
else:
break
else:
if sc_list[i] >= mean_sc:
e = i
else:
break
# Make end 1-based.
e = e + 1
return s, e
################################################################################
def get_top_sc_list_pos(scores_list,
get_lowest=False,
padding=0):
"""
Get highest (or if get_lowest lowest) scoring list position (zero-based).
Return position and score at this position.
scores_list:
1d list with scores.
get_lowest:
Set True to get lowest scoring position.
padding:
Do not look at first #padding and last #padding positions.
>>> sc_list = [0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
>>> get_top_sc_list_pos(sc_list)
(0, 0.6)
>>> get_top_sc_list_pos(sc_list, padding=2)
(2, 0.4)
>>> get_top_sc_list_pos(sc_list, get_lowest=True)
(5, 0.1)
"""
assert scores_list, "scores_list empty"
l_sc = len(scores_list)
assert l_sc >= (padding*2+1), "scores_list length needs to be >= padding*2+1 !"
top_pos = 0
top_sc = -1000
if get_lowest:
top_sc = 1000
for idx in range(padding, l_sc-padding):
pos_sc = scores_list[idx]
if get_lowest:
if pos_sc < top_sc:
top_sc = pos_sc
top_pos = idx
else:
if pos_sc > top_sc:
top_sc = pos_sc
top_pos = idx
return top_pos, top_sc
################################################################################
def get_fid2fidx_mappings(ch_info_dic):
"""
Go through channel features, looking for numerical features in need
of standard deviation calculation. Map feature ID to feature channel
index. Return both-way mappings.
>>> ch_info_dic = {'fa': ['C', [0], ['embed'], 'embed'], 'pc.con': ['N', [3], ['phastcons_score'], 'prob']}
>>> get_fid2fidx_mappings(ch_info_dic)
({'pc.con': 3}, {3: 'pc.con'})
"""
stdev_fid2fidx_dic = {}
stdev_fidx2fid_dic = {}
for fid in ch_info_dic:
feat_type = ch_info_dic[fid][0] # C or N.
feat_idxs = ch_info_dic[fid][1] # channel numbers occupied by feature.
l_idxs = len(feat_idxs)
if feat_type == "N" and l_idxs == 1:
stdev_fid2fidx_dic[fid] = feat_idxs[0]
stdev_fidx2fid_dic[feat_idxs[0]] = fid
return stdev_fid2fidx_dic, stdev_fidx2fid_dic
################################################################################
def gp_eval_make_motif_plots(args, motif_plots_folder,
ch_info_dic, all_features,
si2sc_dic, idx2id_dic, seqs_dic,
worst_win_pert_dic, worst_win_pos_dic,
got_saliencies=False,
calc_stdev=True,
motif_file_dic=None,
motif_mode=1):
"""
Make some motif plots.
got_saliencies:
This means position-wise scores for generating motif are positive,
not negative like when worst windows are given.
worst_win_pert_dic stores saliency score lists,
worst_win_pos_dic stores saliency peak positions.
calc_stdev:
If standard deviations for numerical features should be calculated
for plotting.
motif_mode:
1 : take top whole-site scores, and generate weighted motif.
2 : take top whole-site scores, and generate non-weighted motif.
3 : take top window mutation scores, and generate non-weighted
motif.
"""
# Number of sequence channels.
n_feat_matrix = args.n_feat
if args.embed:
n_feat_matrix = args.n_feat + 3
weighted_motif = False
if motif_mode == 1:
weighted_motif = True
map_nt2idx_dix = {"A" : 0, "C" : 1, "G" : 2, "U" : 3}
# Plot format.
plot_format = "png"
if args.plot_format == 2:
plot_format = "pdf"
# rev_sort meaning from high to low scores.
rev_sort = True
if motif_mode == 3:
rev_sort = False
# Numerical features with standard deviations.
stdev_fid2fidx_dic, stdev_fidx2fid_dic = get_fid2fidx_mappings(ch_info_dic)
# For each motif size.
for motif_size in args.list_motif_sizes:
# Motif center position extension.
motif_extlr = int(motif_size / 2)
# For each number of top sites.
for nr_top_sites in args.list_nr_top_sites:
print("Create motif (size %i) from top %i sites ... " %(motif_size, nr_top_sites))
site_count = 0
c_min_len_skipped = 0
# Init motif score matrix.
motif_matrix = []
for i in range(motif_size):
motif_matrix.append([0.0]*n_feat_matrix)
l_mm = len(motif_matrix) # Motif length.
l_sv = len(motif_matrix[0]) # site vector length.
# Standard deviations for numerical features.
fid2sc_dic = {}
if calc_stdev:
for fid in stdev_fid2fidx_dic:
fid2sc_dic[fid] = []
for i in range(motif_size):
fid2sc_dic[fid].append([])
# Brick by brick.
for si, sc in sorted(si2sc_dic.items(), key=lambda item: item[1], reverse=rev_sort):
# Get feature list for site.
if args.embed:
feat_list = conv_embed_feature_list(all_features[si])
else:
feat_list = all_features[si]
seq_id = idx2id_dic[si]
seq = seqs_dic[seq_id]
seq_list = list(seq)
l_seq = len(seq)
worst_win_pert_list = worst_win_pert_dic[seq_id]
max_pos = worst_win_pos_dic[seq_id] # 0-based.
# Minimum sequence length for extracting motifs.
site_l = len(worst_win_pert_list)
assert site_l == l_seq, "site_l != l_seq (%i != %i)" %(site_l, l_seq)
if site_l < (motif_extlr*2 + 1):
c_min_len_skipped += 1
continue # should be captured by if not max_pos check already.
# Extract max_pos motif brick.
s_brick = max_pos - motif_extlr
e_brick = max_pos + motif_extlr + 1 # 1-based, thus + 1.
worst_sc_brick = worst_win_pert_list[s_brick:e_brick]
seq_brick = seq_list[s_brick:e_brick]
assert s_brick >= 0, "s_brick <= 0 (s_brick = %i) which should not happen since max_pos selection takes care of this" %(s_brick)
assert e_brick <= site_l, "e_brick > site length (%i > %i) which should not happen since max_pos selection takes care of this" %(e_brick, site_l)
start_j = 0
if weighted_motif:
# Update sequence features in weigthed fashion.
for i in range(l_mm):
worst_sc = worst_sc_brick[i]
worst_sc_nt = seq_brick[i]
j = map_nt2idx_dix[worst_sc_nt]
if got_saliencies:
motif_matrix[i][j] = motif_matrix[i][j] + worst_sc
else:
motif_matrix[i][j] = motif_matrix[i][j] - worst_sc
start_j = 4
for i in range(l_mm):
# If weighted_motif=True, just update additional features.
for j in range(start_j, l_sv):
motif_matrix[i][j] += feat_list[s_brick:e_brick][i][j]
# For one-channel numerical features, store scores to calculate stdev.
if calc_stdev:
if j in stdev_fidx2fid_dic:
fid2sc_dic[stdev_fidx2fid_dic[j]][i].append(feat_list[s_brick:e_brick][i][j])
# Increment site count.
site_count += 1
if site_count >= nr_top_sites:
break
assert site_count, "no top motif information extracted for motif_size %i and nr_top_sites %i (site_count = 0)" %(motif_size, nr_top_sites)
print("# motif sites extracted: %i" %(site_count))
if c_min_len_skipped:
print("# sites skipped (min_len): %i" %(c_min_len_skipped))
# Average motif matrix values.
start_j = 0
if weighted_motif:
for i in range(l_mm):
# Sum of nucleotide scores at motif position i.
pos_sum = sum(motif_matrix[i][0:4])
# Normalize each nucleotide score by sum of scores.
for j in range(0,4):
motif_matrix[i][j] = motif_matrix[i][j] / pos_sum
start_j = 4
for i in range(l_mm):
for j in range(start_j, l_sv):
motif_matrix[i][j] = motif_matrix[i][j] / site_count
# Calculate standard deviations for one-channel numerical features at each motif position.
fid2stdev_dic = {}
if calc_stdev:
for fid in fid2sc_dic:
fid2stdev_dic[fid] = []
for i in range(l_mm):
fid2stdev_dic[fid].append(statistics.stdev(fid2sc_dic[fid][i]))
# Plot motif.
if got_saliencies:
motif_out_file = motif_plots_folder + "/" + "top_motif_l" + str(motif_size) + "_top" + str(nr_top_sites) + "." + plot_format
else:
motif_out_file = motif_plots_folder + "/" + "top_motif_l" + str(motif_size) + "_top" + str(nr_top_sites) + ".win." + plot_format
make_motif_plot(motif_matrix, ch_info_dic, motif_out_file,
fid2stdev_dic=fid2stdev_dic)
if motif_file_dic is not None:
motif_file_dic[motif_out_file] = 1
################################################################################
def load_training_data(args,
store_tensors=True,
kmer2idx_dic=False,
feat_info_dic=None,
load_only_pos=False,
load_only_neg=False,
li2label_dic=None):
"""
Load training data from data folder generated by rnaprot gt and
return training data as:
seqs_dic, idx2id_dic, label_list, all_fatures
kmer2idx_dic:
Provide k-mer to index mapping dictionary for k-mer embedding.
store_tensors:
Store data as tensors in returned all_features.
feat_info_dic:
Store feature infos in given dictionary.
li2label_dic:
Class label to RBP label/name dictionary. For associating the
positive class label to the RBP name in generic model cross
validation (if --gm-cv is set).
load_only_pos:
Return only positive instances.
"""
# Checks.
assert os.path.exists(args.in_folder), "--in folder does not exist"
if args.embed:
assert kmer2idx_dic, "embed enabled but missing kmer2idx_dic"
assert args.embed_k, "embed enabled but missing embed_k"
# Feature file containing info for features inside --in folder.
feat_file = args.in_folder + "/" + "features.out"
assert os.path.exists(feat_file), "%s features file expected but not does not exist" %(feat_file)
# rnaprot train output folder.
if not os.path.exists(args.out_folder):
os.makedirs(args.out_folder)
"""
fa C A,C,G,U -
str N E,H,I,M,S prob
pc.con N phastcons_score prob
pp.con N phylop_score minmax2
tra C C,F,N,T -
eia C E,I -
rra C N,R -
"""
# Channel info output file.
channel_infos_out = args.out_folder + "/" + "channel_infos.out"
channel_info_list = []
channel_nr = 0
fid2type_dic = {}
fid2cat_dic = {} # Store category labels or numerical score IDs in list.
fid2norm_dic = {}
print("Read in feature infos from %s ... " %(feat_file))
with open(feat_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
feat_id = cols[0]
feat_type = cols[1] # C,N
feat_cat_list = cols[2].split(",")
feat_cat_list.sort()
feat_norm = cols[3]
assert feat_id not in fid2type_dic, "feature ID \"%s\" found twice in feature file" %(feat_id)
fid2type_dic[feat_id] = feat_type
fid2cat_dic[feat_id] = feat_cat_list
fid2norm_dic[feat_id] = feat_norm
f.closed
assert fid2type_dic, "no feature IDs read in from feature file %s" %(feat_file)
# Read in FASTA sequences.
pos_fa_in = args.in_folder + "/" + "positives.fa"
neg_fa_in = args.in_folder + "/" + "negatives.fa"
assert os.path.exists(pos_fa_in), "--in folder does not contain %s" %(pos_fa_in)
assert os.path.exists(neg_fa_in), "--in folder does not contain %s" %(neg_fa_in)
# Check sequence feature.
assert "fa" in fid2type_dic, "feature ID \"fa\" not in feature file"
assert fid2cat_dic["fa"] == ["A", "C", "G", "U"], "sequence feature alphabet != A,C,G,U"
# Read in sequences.
print("Read in sequences ... ")
seqs_dic = read_fasta_into_dic(pos_fa_in, all_uc=True)
pos_ids_dic = {}
for seq_id in seqs_dic:
pos_ids_dic[seq_id] = 1
seqs_dic = read_fasta_into_dic(neg_fa_in, all_uc=True,
seqs_dic=seqs_dic)
assert seqs_dic, "no sequences read from FASTA files"
neg_ids_dic = {}
for seq_id in seqs_dic:
if seq_id not in pos_ids_dic:
neg_ids_dic[seq_id] = 1
# Check for 4 (8) distinct nucleotides.
cc_dic = seqs_dic_count_chars(seqs_dic)
allowed_nt_dic = {'A': 1, 'C': 1, 'G': 1, 'U': 1}
c_nts = 4
for nt in cc_dic:
if nt not in allowed_nt_dic:
assert False, "sequences with invalid character \"%s\" encountered (allowed characters: ACGU" %(nt)
assert len(cc_dic) == c_nts, "# of distinct nucleotide characters in sequences != expected # (%i != %i)" %(len(cc_dic), c_nts)
# Check for individually selected features.
indiv_feat_dic = {}
if args.use_pc_con:
indiv_feat_dic["pc.con"] = 1
if args.use_pp_con:
indiv_feat_dic["pp.con"] = 1
if args.use_eia:
indiv_feat_dic["eia"] = 1
if args.use_tra:
indiv_feat_dic["tra"] = 1
if args.use_rra:
indiv_feat_dic["rra"] = 1
if args.use_str:
indiv_feat_dic["str"] = 1
# Looking for additional features.
std_fid_dic = {"pc.con" : 1,
"pp.con" : 1,
"eia" : 1,
"tra" : 1,
"rra" : 1,
"fa" : 1,
"str" : 1}
add_fid_dic = {}
for fid in fid2type_dic:
if fid not in std_fid_dic:
add_fid_dic[fid] = 1
if args.use_add_feat:
for fid in add_fid_dic:
indiv_feat_dic[fid] = 1
# Remove features from fid2type_dic.
if indiv_feat_dic:
del_feat_list = []
for fid in fid2type_dic:
if fid == "fa":
continue
if fid not in indiv_feat_dic:
del_feat_list.append(fid)
for fid in del_feat_list:
del fid2type_dic[fid]
# If only_seq, remove all other found features.
if args.only_seq:
fid2type_dic = {}
fid2type_dic["fa"] = "C"
# Data dictionaries.
feat_dic = {}
# features.out str row.
feat_out_str_row = False
# Init feat_dic (storing node feature vector data) with sequence one-hot encodings.
for seq_id in seqs_dic:
seq = seqs_dic[seq_id]
if args.embed:
feat_dic[seq_id] = convert_seq_to_kmer_embedding(seq, args.embed_k, kmer2idx_dic,
l2d=True)
else:
feat_dic[seq_id] = string_vectorizer(seq, custom_alphabet=fid2cat_dic["fa"])
if args.embed:
channel_nr += 1
# Add sequence embedding channel.
channel_info = "%i\tembed\tfa\tC\tembedding" %(channel_nr)
channel_info_list.append(channel_info)
if feat_info_dic is not None:
feat_info_dic["fa"] = "C;embed"
else:
# Add sequence one-hot channels.
for c in fid2cat_dic["fa"]:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\tfa\tC\tone_hot" %(channel_nr, channel_id)
channel_info_list.append(channel_info)
if feat_info_dic is not None:
feat_info_dic["fa"] = "C;A,C,G,U"
# Check and read in more data.
for fid, ftype in sorted(fid2type_dic.items()): # fid e.g. fa, ftype: C,N.
if fid == "fa": # already added to feat_dic (first item).
continue
feat_alphabet = fid2cat_dic[fid]
pos_feat_in = args.in_folder + "/positives." + fid
neg_feat_in = args.in_folder + "/negatives." + fid
assert os.path.exists(pos_feat_in), "--in folder does not contain %s" %(pos_feat_in)
assert os.path.exists(neg_feat_in), "--in folder does not contain %s" %(neg_feat_in)
print("Read in .%s annotations ... " %(fid))
if fid == "str":
# Deal with structure data.
feat_dic = read_str_feat_into_dic(pos_feat_in,
str_mode=args.str_mode,
feat_dic=feat_dic)
feat_dic = read_str_feat_into_dic(neg_feat_in,
str_mode=args.str_mode,
feat_dic=feat_dic)
assert feat_dic, "no .%s information read in (feat_dic empty)" %(fid)
if args.str_mode == 1:
# Same as read in.
for c in feat_alphabet:
channel_nr += 1
channel_id = c
encoding = fid2norm_dic[fid]
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
feat_out_str_row = "str\tN\tE,H,I,M,S\tprob"
if feat_info_dic is not None:
feat_info_dic["str"] = "N;E,H,I,M,S"
elif args.str_mode == 2:
for c in feat_alphabet:
channel_nr += 1
channel_id = c
channel_info = "%i\t%s\t%s\tC\tone_hot" %(channel_nr, channel_id, fid)
channel_info_list.append(channel_info)
feat_out_str_row = "str\tC\tE,H,I,M,S\t-"
if feat_info_dic is not None:
feat_info_dic["str"] = "C;E,H,I,M,S"
elif args.str_mode == 3:
channel_nr += 1
channel_id = "up"
encoding = "prob"
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
feat_out_str_row = "str\tN\tup\tprob"
if feat_info_dic is not None:
feat_info_dic["str"] = "N;up"
elif args.str_mode == 4:
channel_nr += 1
channel_info = "%i\tP\tstr\tC\tone_hot" %(channel_nr)
channel_info_list.append(channel_info)
channel_nr += 1
channel_info = "%i\tU\tstr\tC\tone_hot" %(channel_nr)
channel_info_list.append(channel_info)
feat_out_str_row = "str\tC\tP,U\t-"
if feat_info_dic is not None:
feat_info_dic["str"] = "C;P,U"
else:
assert False, "invalid str_mode given"
else:
"""
All features (additional to .fa and .str) like
.pc.con, .pp.con, .eia, .tra, .rra, or user-defined.
"""
feat_dic = read_feat_into_dic(pos_feat_in, ftype,
feat_dic=feat_dic,
label_list=feat_alphabet)
feat_dic = read_feat_into_dic(neg_feat_in, ftype,
feat_dic=feat_dic,
label_list=feat_alphabet)
assert feat_dic, "no .%s information read in (feat_dic empty)" %(fid)
ch_id_str = ",".join(feat_alphabet)
if ftype == "N":
for c in feat_alphabet:
channel_nr += 1
channel_id = c
encoding = fid2norm_dic[fid]
channel_info = "%i\t%s\t%s\tN\t%s" %(channel_nr, channel_id, fid, encoding)
channel_info_list.append(channel_info)
if feat_info_dic is not None:
feat_info_dic[fid] = "N;" + ch_id_str
elif ftype == "C":
for c in feat_alphabet:
channel_nr += 1
#channel_id = fid + "_" + c
channel_id = c
channel_info = "%i\t%s\t%s\tC\tone_hot" %(channel_nr, channel_id, fid)
channel_info_list.append(channel_info)
if feat_info_dic is not None:
feat_info_dic[fid] = "C;" + ch_id_str
else:
assert False, "invalid feature type given (%s) for feature %s" %(ftype,fid)
# Check for same feature vector lengths.
fvl_dic = {}
for seq_id in feat_dic:
fvl_dic[len(feat_dic[seq_id][0])] = 1
len_fvl_dic = len(fvl_dic)
assert len_fvl_dic == 1, "various feature vector lengths (%i) encountered in feat_dic" %(len_fvl_dic)
# Write used features.out file to rnaprot train output folder.
feat_table_out = args.out_folder + "/" + "features.out"
FEATOUT = open(feat_table_out, "w")
with open(feat_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
feat_id = cols[0]
if feat_id in fid2type_dic:
if feat_id == "str":
FEATOUT.write("%s\n" %(feat_out_str_row))
else:
FEATOUT.write("%s\n" %(row))
f.closed
FEATOUT.close()
# Output channel infos.
CIOUT = open(channel_infos_out, "w")
CIOUT.write("ch\tch_id\tfeat_id\tfeat_type\tencoding\n")
for ch_info in channel_info_list:
CIOUT.write("%s\n" %(ch_info))
CIOUT.close()
"""
Generate list of feature lists all_features.
"""
# Sequence ID list + label list.
seq_ids_list = []
sorted_pos_ids_list = []
label_list = []
idx2id_dic = {}
id2idx_dic = {}
i = 0
for seq_id,c in sorted(pos_ids_dic.items()):
seq_ids_list.append(seq_id)
sorted_pos_ids_list.append(seq_id)
label_list.append(1)
id2idx_dic[seq_id] = i
idx2id_dic[i] = seq_id
i += 1
for seq_id,c in sorted(neg_ids_dic.items()):
seq_ids_list.append(seq_id)
label_list.append(0)
id2idx_dic[seq_id] = i
idx2id_dic[i] = seq_id
i += 1
"""
In case of generic model cross validation (--gm-cv), create new label_list.
Use n labels for n RBPs + + "0" label for negatives.
"""
if args.gm_cv:
# Seen labels (== RBP names) dictionary.
label_dic = {}
# Site ID to label dictionary.
id2l_dic = {}
# Label index.
li = 0
for seq_id in sorted_pos_ids_list:
m = re.search("(.+?)_", seq_id)
if m:
label = m.group(1)
if not label in label_dic:
li += 1
label_dic[label] = li
id2l_dic[seq_id] = li
if li2label_dic is not None:
li2label_dic[li] = label
else:
assert False, "Generic data RBP label extraction failed for \"%s\"" % (seq_id)
# Construct label list for positives.
label_list = []
for seq_id in sorted_pos_ids_list:
label = id2l_dic[seq_id]
label_list.append(label)
# Add negatives to label vector.
label_list = label_list + [0]*len(neg_ids_dic)
assert len(label_list) == len(seqs_dic), "len(label_list) != len(seqs_dic)"
# Construct features list.
all_features = []
for idx, label in enumerate(label_list):
seq_id = seq_ids_list[idx]
if store_tensors:
all_features.append(torch.tensor(feat_dic[seq_id], dtype=torch.float))
else:
all_features.append(feat_dic[seq_id])
assert all_features, "no features stored in all_features (all_features empty)"
"""
~~~ RETURNS ~~~
seqs_dic:
Sequences dictionary.
idx2id_dic:
list index to sequence ID mapping.
label_list:
Class label list (indices correspond to all_features list)
all_features:
List of feature matrices / tensors for positive + negative
dataset, with order as in label_list. Get sequence ID with
idx2id_dic, using the list index.
"""
if load_only_pos:
new_all_feat = []
new_label_list = []
new_idx2id_dic = {}
new_seqs_dic = {}
new_idx = 0
for idx in idx2id_dic:
seq_id = idx2id_dic[idx]
if seq_id in pos_ids_dic:
new_all_feat.append(all_features[idx])
new_seqs_dic[seq_id] = seqs_dic[seq_id]
new_label_list.append(1)
new_idx2id_dic[new_idx] = seq_id
new_idx += 1
return new_seqs_dic, new_idx2id_dic, new_label_list, new_all_feat
else:
return seqs_dic, idx2id_dic, label_list, all_features
################################################################################
def shuffle_idx_feat_labels(labels, features,
random_seed=False,
idx2id_dic=False):
"""
Shuffle features list and return shuffled list, together with
new labels list corresponding to new label order in features list
and an updated dictionary idx2id_dic, mapping the index to an
identifier like sequence ID.
random_seed:
Set random.seed().
idx2id_dic:
If given, update dictionary with new indices (after shuffling)
to ID mapping.
"""
assert labels, "given labels empty"
assert features, "given features empty"
if random_seed:
random.seed(random_seed)
labels_add = []
for idx,label in enumerate(labels):
labels_add.append([label,idx])
features_labels = list(zip(features, labels_add))
random.shuffle(features_labels)
features, labels_add = zip(*features_labels)
idx2id_dic_new = {}
new_labels = []
for idx,lst in enumerate(labels_add):
label = lst[0]
old_idx = lst[1]
new_labels.append(label)
if idx2id_dic:
assert old_idx in idx2id_dic, "index %i is not a key in idx2id_dic" %(old_idx)
seq_id = idx2id_dic[old_idx]
idx2id_dic_new[idx] = seq_id
assert new_labels, "resulting new_labels list empty"
if idx2id_dic:
idx2id_dic = idx2id_dic_new
return new_labels, features
################################################################################
def read_settings_into_dic(settings_file,
val_col2=False):
"""
Read settings file content into dictionary.
Each row expected to have following format:
setting_id<tab>setting_value
Skip rows with > 2 entries.
Dictionary format: str(col1) -> str(col2)
>>> test_in = "test_data/test_settings.out"
>>> read_settings_into_dic(test_in)
{'peyote': '20.5', 'china_white': '43.1', 'bolivian_marching_powder': '1000.0'}
"""
assert settings_file, "file name expected"
assert os.path.isfile(settings_file), "file %s does not exist" %(settings_file)
set_dic = {}
with open(settings_file) as f:
for line in f:
cols = line.strip().split("\t")
settings_id = cols[0]
if val_col2:
settings_val = cols[2]
else:
settings_val = cols[1]
if settings_id not in set_dic:
set_dic[settings_id] = settings_val
else:
assert False, "settings ID %s appears > 1 in given settings file" %(settings_id)
f.closed
assert set_dic, "set_dic empty (nothing read in?)"
return set_dic
################################################################################
def read_feat_into_dic(feat_file, feat_type,
feat_dic=False,
n_to_1h=False,
label_list=False):
"""
Read in feature data from feat_file into dictionary of lists.
Mapping: sequence ID -> list of labels
feat_type:
Type of feature, set "C" for categorical and "N" for numerical
label_list:
Needed for C feature, supply label_list to do one-hot encoding
n_to_1h:
For structural elements probabilities, to convert them into
one-hot encodings.
1) Categorical data (C)
Categorical (feat_type == C) data example, with label_list = ['E', 'I']:
Old format:
CLIP_1 EI
CLIP_2 IE
Generated one-hot lists:
[[1, 0], [0, 1]]
[[0, 1], [1, 0]]
Generated dictionary:
{'CLIP_1': [[1, 0], [0, 1]], 'CLIP_2': [[0, 1], [1, 0]]}
New format (like FASTA):
>CLIP_1
EI
>CLIP_2
IE
2) Numerical data (N)
Numerical (feat_type == N) data example:
>CLIP_1
0.1
-0.2
>CLIP_2
0.4
0.2
Generated lists:
[[0.1], [-0.2]]
[[0.4], [0.2]]
Generated dictionary:
{'CLIP_1': [[0.1], [-0.2]], 'CLIP_2': [[0.4], [0.2]]}
test.pp.con:
>CLIP_01
0.1
0.2
>CLIP_02
0.4
0.5
test2.pp.con:
>CLIP_01
0.1 0.2
0.3 0.4
>CLIP_02
0.5 0.6
0.7 0.8
>>> num_test_in = "test_data/test.pp.con"
>>> read_feat_into_dic(num_test_in, "N")
{'CLIP_01': [[0.1], [0.2]], 'CLIP_02': [[0.4], [0.5]]}
>>> num_test_in = "test_data/test2.pp.con"
>>> read_feat_into_dic(num_test_in, "N")
{'CLIP_01': [[0.1, 0.2], [0.3, 0.4]], 'CLIP_02': [[0.5, 0.6], [0.7, 0.8]]}
>>> add_feat_dic = {'CLIP_01': [[0.1], [0.2]], 'CLIP_02': [[0.4], [0.5]]}
>>> num_test_in = "test_data/test.pp.con"
>>> read_feat_into_dic(num_test_in, "N", feat_dic=add_feat_dic)
{'CLIP_01': [[0.1, 0.1], [0.2, 0.2]], 'CLIP_02': [[0.4, 0.4], [0.5, 0.5]]}
>>> cat_test_in = "test_data/test.tra"
>>> tra_labels = ['C', 'F', 'N', 'T']
>>> read_feat_into_dic(cat_test_in, "C", label_list=tra_labels)
{'site1': [[0, 1, 0, 0], [0, 0, 0, 1]], 'site2': [[1, 0, 0, 0], [0, 0, 1, 0]]}
>>> cat_test_in = "test_data/test_new_format.tra"
>>> read_feat_into_dic(cat_test_in, "C", label_list=tra_labels)
{'site1': [[0, 1, 0, 0], [0, 0, 0, 1]], 'site2': [[1, 0, 0, 0], [0, 0, 1, 0]]}
"""
feat_dic_given = False
if not feat_dic:
feat_dic = {}
else:
feat_dic_given = True
types = ['C', 'N']
assert feat_type in types, "invalid feature type given (expects C or N)"
if feat_type == 'C':
assert label_list, "label_list needed if feat_type == C"
old_cat_format = True
# Check which file format is present.
with open(feat_file) as f:
for line in f:
if re.search("^>.+", line):
old_cat_format = False
elif re.search(".+?\t.+?", line):
old_cat_format = True
else:
assert False, "invalid format encountered in feat_file %s" %(feat_file)
break
f.closed
if old_cat_format:
with open(feat_file) as f:
for line in f:
cols = line.strip().split("\t")
seq_id = cols[0]
if seq_id not in feat_dic:
feat_dic[seq_id] = string_vectorizer(cols[1], custom_alphabet=label_list)
else:
# feat_dic already populated / initialized.
add_list = string_vectorizer(cols[1], custom_alphabet=label_list)
assert add_list, "add_list empty (feat_file: %s, seq_id: %s)" %(feat_file, seq_id)
# Check.
l_old = len(feat_dic[seq_id])
l_add = len(add_list)
assert l_old == l_add, "existing list length in feat_dic != list length from feat_file to add (feat_file: %s, seq_id: %s)" %(feat_file, seq_id)
for i in range(l_old):
feat_dic[seq_id][i] += add_list[i]
f.closed
else:
# Read in feature sequences into dic.
id2featstr_dic = read_cat_feat_into_dic(feat_file)
#{'site1': 'EEIIIIIIIIIIIIIIEEEE', 'site2': 'EEEEIIII'}
for seq_id in id2featstr_dic:
seq = id2featstr_dic[seq_id]
if seq_id not in feat_dic:
feat_dic[seq_id] = string_vectorizer(seq, custom_alphabet=label_list)
else:
# feat_dic already populated / initialized.
add_list = string_vectorizer(seq, custom_alphabet=label_list)
assert add_list, "add_list empty (feat_file: %s, seq_id: %s)" %(feat_file, seq_id)
# Check.
l_old = len(feat_dic[seq_id])
l_add = len(add_list)
assert l_old == l_add, "existing list length in feat_dic != list length from feat_file to add (feat_file: %s, seq_id: %s)" %(feat_file, seq_id)
for i in range(l_old):
feat_dic[seq_id][i] += add_list[i]
else:
seq_id = ""
pos_i = 0
with open(feat_file) as f:
for line in f:
if re.search(">.+", line):
m = re.search(">(.+)", line)
seq_id = m.group(1)
# Init only necessary if no populated / initialized feat_dic given.
if not feat_dic_given:
feat_dic[seq_id] = []
pos_i = 0
else:
vl = line.strip().split('\t')
for i,v in enumerate(vl):
vl[i] = float(v)
if n_to_1h:
vl_1h = convert_prob_list_to_1h(vl)
vl = vl_1h
if feat_dic_given:
for v in vl:
feat_dic[seq_id][pos_i].append(v)
else:
feat_dic[seq_id].append(vl)
pos_i += 1
f.closed
assert feat_dic, "feat_dic empty"
return feat_dic
################################################################################
def revise_in_sites(in_bed, out_bed,
chr_len_dic, id2pl_dic, args,
transcript_regions=False):
"""
Revise positive or negative sites as part of rnaprot gt.
Output rows with zero values in column 5, store original rows in
dictionary. Return this site ID to row dictionary.
Zero scores are necessary since twoBitToFa despises decimal scores.
id2pl_dic:
If given, store part lengths (lower case, uppercase, lowercase) in
given dictionary.
"""
# Checks.
assert chr_len_dic, "chr_len_dic empty"
assert id2pl_dic, "id2pl_dic empty"
# Site ID to original row ID with scores dictionary.
id2row_dic = {}
# Store revised sites in output BED file.
BEDOUT = open(out_bed, "w")
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_sc = cols[4]
site_pol = cols[5]
new_s = site_s
new_e = site_e
site_len = new_e - new_s
# Since score can be decimal, convert to 0 (twoBitToFa despises decimal scores).
new_sc = "0"
# Store old row.
old_row = "%s\t%i\t%i\t%s\t%s\t%s" %(chr_id, new_s, new_e, site_id, site_sc, site_pol)
new_row = "%s\t%i\t%i\t%s\t%s\t%s" %(chr_id, new_s, new_e, site_id, new_sc, site_pol)
id2row_dic[site_id] = old_row
BEDOUT.write("%s\n" %(new_row))
f.closed
BEDOUT.close()
return id2row_dic
################################################################################
def process_test_sites(in_bed, out_bed, chr_len_dic, args,
check_ids=False,
transcript_regions=False,
count_dic=None,
id_prefix=False):
"""
Process --in sites from rnaprot gp.
"""
# Checks.
assert chr_len_dic, "chr_len_dic empty"
# Store BED rows with scores.
id2row_dic = {}
# Filtered output BED file, with "0" scores for sequence extraction.
BEDOUT = open(out_bed, "w")
# Counts.
c_in = 0
c_filt_ref = 0
c_out = 0
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_sc = float(cols[4])
site_pol = cols[5]
site_len = site_e - site_s
# Some checks.
assert chr_id in chr_len_dic, "chromosome ID \"%s\" not in chr_len_dic" %(chr_id)
assert site_pol == "+" or site_pol == "-", "invalid strand info (column 6) given in --in line \"%s\"" %(row)
assert site_e > site_s, "--in site end <= site start (%i <= %i, site_id: %s)" %(site_e, site_s, site_id)
# Check for valid coordinates (not outside chromosome).
assert site_s >= 0, "--in site start < 0 (site_id: %s)" %(site_id)
assert site_e <= chr_len_dic[chr_id], "--in site end > reference sequence length (%i > %i, site_id: %s, ref_id: %s)" %(site_e, chr_len_dic[chr_id], site_id, chr_id)
c_in += 1
# Restrict to standard chromosomes.
if not transcript_regions:
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
c_filt_ref += 1
continue
else:
chr_id = new_chr_id
# Make site polarities "+" for transcript sites.
if transcript_regions:
site_pol = "+"
# 1: Take the center of each site.
# 2: Take the complete site.
# 3: Take the upstream end for each site.
new_s = site_s
new_e = site_e
if args.mode == 1:
# Take center position.
new_e = get_center_position(site_s, site_e)
new_s = new_e - 1
elif args.mode == 3:
new_s = site_s
new_e = site_s + 1
if site_pol == "-":
new_s = site_e - 1
new_e = site_e
# Extend.
if args.seq_ext:
new_s = new_s - args.seq_ext
new_e = new_e + args.seq_ext
# Truncate sites at reference ends.
if new_s < 0:
new_s = 0
if new_e > chr_len_dic[chr_id]:
new_e = chr_len_dic[chr_id]
# Check length.
new_len = new_e - new_s
assert new_len > 1, "sites with length 1 encountered. Please provide longer sites, or use --seq-ext to prolong them (or change --mode setting). Ideally site lengths should be >= median training site length"
# IDs.
c_out += 1
# Remove white spaces from IDs.
if check_ids:
site_id = site_id.strip().replace(" ", "_")
# New IDs.
if id_prefix:
new_site_id = id_prefix + "_" + str(c_out)
else:
new_site_id = site_id
# Check whether score is whole number.
if not site_sc % 1:
site_sc = int(site_sc)
# Convert to string.
site_sc = str(site_sc)
new_sc = "0"
# Store and print out sites.
old_row = "%s\t%i\t%i\t%s\t%s\t%s" %(chr_id, new_s, new_e, new_site_id, site_sc, site_pol)
new_row = "%s\t%i\t%i\t%s\t%s\t%s" %(chr_id, new_s, new_e, new_site_id, new_sc, site_pol)
id2row_dic[new_site_id] = old_row
BEDOUT.write("%s\n" %(new_row))
f.closed
BEDOUT.close()
# Count stats dic.
if count_dic is not None:
count_dic['c_in'] = c_in
count_dic['c_filt_ref'] = c_filt_ref
count_dic['c_out'] = c_out
assert id2row_dic, "id2row_dic empty"
return id2row_dic
################################################################################
def process_in_sites(in_bed, out_bed, chr_len_dic, args,
transcript_regions=False,
id2pl_dic=None,
count_dic=None,
id_prefix="CLIP"):
"""
Process --in or --neg-in sites as part of rnaprot gt.
Return dictionary of lists, with
upstream lowercase length, uppercase length, downstream lowercase length
for every site ID. E.g.
{'id1' : [150, 61, 150], 'id2' : [100, 61, 150]}
id2pl_dic:
Site ID to part lengths list dictionary.
count_dic:
Count stats dictionary.
"""
# Checks.
assert chr_len_dic, "chr_len_dic empty"
# Filtered output BED file.
BEDOUT = open(out_bed, "w")
# Part lengths dictionary.
if id2pl_dic is None:
id2pl_dic = {}
# Min length ext.
min_len_ext = int( (args.min_len - 1) / 2)
# Counts.
c_in = 0
c_filt_max_len = 0
c_filt_ref = 0
c_filt_thr = 0
c_chr_ends = 0
c_out = 0
with open(in_bed) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
site_s = int(cols[1])
site_e = int(cols[2])
site_id = cols[3]
site_sc = float(cols[4])
site_pol = cols[5]
site_len = site_e - site_s
# Checks.
assert chr_id in chr_len_dic, "chromosome ID \"%s\" not in chr_len_dic" %(chr_id)
assert site_pol == "+" or site_pol == "-", "invalid strand info (column 6) given in --in line \"%s\"" %(row)
assert site_e > site_s, "--in site end <= site start (%i <= %i, site_id: %s)" %(site_e, site_s, site_id)
# Check for valid coordinates (not outside chromosome).
assert site_s >= 0, "--in site start < 0 (site_id: %s)" %(site_id)
assert site_e <= chr_len_dic[chr_id], "--in site end > reference sequence length (%i > %i, site_id: %s, ref_id: %s)" %(site_e, chr_len_dic[chr_id], site_id, chr_id)
c_in += 1
# Filter by max_len.
if site_len > args.max_len:
c_filt_max_len += 1
continue
# Filter by score.
if args.sc_thr is not None:
if args.rev_filter:
if site_sc > args.sc_thr:
c_filt_thr += 1
continue
else:
if site_sc < args.sc_thr:
c_filt_thr += 1
continue
# Restrict to standard chromosomes.
if not transcript_regions:
new_chr_id = check_convert_chr_id(chr_id)
if not new_chr_id:
c_filt_ref += 1
continue
else:
chr_id = new_chr_id
# Make site polarities "+" for transcript sites.
if transcript_regions:
site_pol = "+"
# Process site coordinates according to set parameters (mode, ...).
new_s = site_s
new_e = site_e
# 1: Take the center of each site.
# 2: Take the complete site.
# 3: Take the upstream end for each site.
if args.mode == 1:
# Take center position.
new_e = get_center_position(site_s, site_e)
new_s = new_e - 1
elif args.mode == 2:
# Take complete site, unless --min-len or --max-len applies.
if site_len < args.min_len:
new_e = get_center_position(site_s, site_e)
new_s = new_e - min_len_ext - 1
new_e = new_e + min_len_ext
elif args.mode == 3:
new_s = site_s
new_e = site_s + 1
if site_pol == "-":
new_s = site_e - 1
new_e = site_e
else:
assert False, "invalid mode set (args.mode value == %i)" %(args.mode)
# Extend.
if args.seq_ext:
new_s = new_s - args.seq_ext
new_e = new_e + args.seq_ext
# Truncate sites at reference ends.
if new_s < 0:
new_s = 0
if new_e > chr_len_dic[chr_id]:
new_e = chr_len_dic[chr_id]
# IDs.
c_out += 1
new_site_id = id_prefix + "_" + str(c_out)
if args.keep_ids:
new_site_id = site_id
# Site lengths.
seq_ext_len = new_e - new_s
# Store future uppercase region length.
id2pl_dic[new_site_id] = [0, seq_ext_len, 0]
# Store site length in list.
new_site_len = new_e - new_s
# Check whether score is whole number.
if not site_sc % 1:
site_sc = int(site_sc)
# Convert to string.
site_sc = str(site_sc)
# Print out sites.
BEDOUT.write("%s\t%i\t%i\t%s\t%s\t%s\n" %(chr_id, new_s, new_e, new_site_id, site_sc, site_pol) )
f.closed
BEDOUT.close()
# Count stats dic.
if count_dic is not None:
count_dic['c_in'] = c_in
count_dic['c_filt_max_len'] = c_filt_max_len
count_dic['c_filt_thr'] = c_filt_thr
count_dic['c_filt_ref'] = c_filt_ref
count_dic['c_out'] = c_out
return id2pl_dic
################################################################################
def scores_to_plot_df(scores,
stdev=False):
"""
Given a list of scores, generate a dataframe with positions from 1 to
length(scores_list) and scores list.
Dictionary of lists intermediate
data = {'pos': [1,2,3], 'scores': [0.2,0.4,0.5]}
Then create dataframe with
pd.DataFrame (data, columns = ['pos', 'scores'])
stdev:
Vector of standard deviations belonging to scores.
"""
assert scores, "given scores list empty"
if stdev:
assert len(scores) == len(stdev), "len(scores) != len(stdev)"
data = {'pos': [], 'score': [], 'stdev': []}
else:
data = {'pos': [], 'score': []}
for i,s in enumerate(scores):
data['pos'].append(i) # i+1 ?
data['score'].append(s)
if stdev:
data['stdev'].append(stdev[i])
if stdev:
plot_df = pd.DataFrame(data, columns = ['pos', 'score', 'stdev'])
else:
plot_df = pd.DataFrame(data, columns = ['pos', 'score'])
return plot_df
################################################################################
def convert_prob_list_to_1h(lst):
"""
Convert list of probabilities or score values into one-hot encoding list,
where element with highest prob./score gets 1, others 0.
>>> lst = [0.3, 0.5, 0.2, 0.1, 0.1]
>>> convert_prob_list_to_1h(lst)
[0, 1, 0, 0, 0]
"""
assert lst, "given lst empty"
new_lst = [0]*len(lst)
max_i = 0
max_e = 0
for i,e in enumerate(lst):
if e > max_e:
max_e = e
max_i = i
new_lst[max_i] = 1
return new_lst
################################################################################
def seq_to_plot_df(seq, alphabet,
default_score=1,
scores=False):
"""
Given a sequence, generate a pandas dataframe from it.
Format example:
sequence = "AACGT"
alphabet = ["A", "C", "G", "T"]
Intermediate dictionary of lists:
data = {'A' : [1,1,0,0,0], 'C' : [0,0,1,0,0], 'G' : [0,0,0,1,0], 'T' : [0,0,0,0,1]}
Final dataframe:
A C G T
pos
0 1 0 0 0
1 1 0 0 0
2 0 1 0 0
3 0 0 1 0
4 0 0 0 1
seq:
Sequence string to generate pandas dataframe for logo
generation from.
alphabet:
List of sequence characters to consider for logo generation.
scores:
Scores list, to use instead of score of 1 for nucleotide.
"""
assert seq, "empty sequence given"
assert alphabet, "alphabet character list empty"
if scores:
assert len(seq) == len(scores), "length scores list != length sequence"
alphabet.sort()
data = {}
for c in alphabet:
data[c] = []
for i,sc in enumerate(seq):
assert sc in alphabet, "sequence character \"%s\" not in given alphabet" %(sc)
score = default_score
if scores:
score = scores[i]
for c in alphabet:
if c == sc:
data[c].append(score)
else:
data[c].append(0)
plot_df = pd.DataFrame(data, columns = alphabet)
plot_df.index.name = "pos"
return plot_df
################################################################################
def perturb_to_plot_df(perturb_sc_list):
"""
Convert perturbation scores list into dataframe.
"""
assert perturb_sc_list, "empty perturb_sc_list given"
alphabet = ["A", "C", "G", "U"]
data = {}
for c in alphabet:
data[c] = []
for psv in perturb_sc_list:
for idx,nt in enumerate(alphabet):
data[nt].append(psv[idx])
plot_df = pd.DataFrame(data, columns = alphabet)
plot_df.index.name = "pos"
return plot_df
################################################################################
def add_importance_scores_plot(df, fig, gs, i,
color_dict=False,
y_label="score",
y_label_size=9):
"""
Make nucleotide importance scores plot.
Normalized profile scores range from -1 .. 1.
"""
ax = fig.add_subplot(gs[i, :])
if color_dict:
logo = logomaker.Logo(df, ax=ax, color_scheme=color_dict)
else:
logo = logomaker.Logo(df, ax=ax)
logo.style_spines(visible=False)
logo.style_spines(spines=['left'], visible=True, bounds=[-1, 1])
logo.style_spines(spines=['bottom'], visible=False)
logo.ax.set_xticks([])
logo.ax.set_yticks([-1, 0, 1])
# plt.xticks(fontsize=7, rotation=90)
plt.yticks(fontsize=7)
# ax.yaxis.set_tick_params(labelsize=7)
logo.ax.set_yticklabels(['-1', '0', '1'])
logo.ax.set_ylabel(y_label, labelpad=10, fontsize=y_label_size)
################################################################################
def add_saliency_scores_plot(df, fig, gs, i,
color_dict=False,
y_label="saliency",
y_label_size=9):
"""
Make nucleotide importance scores plot.
Normalized profile scores range from -1 .. 1.
"""
ax = fig.add_subplot(gs[i, :])
if color_dict:
logo = logomaker.Logo(df, ax=ax, color_scheme=color_dict)
else:
logo = logomaker.Logo(df, ax=ax)
logo.style_spines(visible=False)
#logo.style_spines(spines=['left'], visible=True, bounds=[-1, 1])
logo.style_spines(spines=['left'], visible=True)
logo.style_spines(spines=['bottom'], visible=False)
logo.ax.set_xticks([])
#logo.ax.set_yticks([-1, 0, 1])
# plt.xticks(fontsize=7, rotation=90)
plt.yticks(fontsize=7)
# ax.yaxis.set_tick_params(labelsize=7)
#logo.ax.set_yticklabels(['-1', '0', '1'])
logo.ax.set_ylabel(y_label, labelpad=10, fontsize=y_label_size)
################################################################################
def round10up(x):
"""
Round up to next 10.
>>> a = 110
>>> round10up(a)
110
>>> a = 111
>>> round10up(a)
120
>>> a = 0
>>> round10up(a)
0
"""
return int(ceil(x / 10.0)) * 10
################################################################################
def round10down(x):
"""
Round down to next 10.
>>> a = 110
>>> round10down(a)
110
>>> a = 119
>>> round10down(a)
110
"""
return int(floor(x / 10.0)) * 10
################################################################################
def add_label_plot(df, fig, gs, i,
color_dict=False,
y_label_size=9,
add_x_ticks=False,
x_tick_spacing=10,
x_tick_start=1,
ref_pol="+",
koma_sepp_1000=True,
y_label="exon-intron"):
"""
Make exon-intron label plot.
x_tick_start:
One-based start coordinate for x ticks. For whole sites this
equals to 1. For x_tick_start = 1, first tick would be at 10.
koma_sepp_1000:
Separate x tick coordinates > 999 with "," (comma separator
for numbers > 999).
"""
ax = fig.add_subplot(gs[i, :])
if color_dict:
logo = logomaker.Logo(df, ax=ax, vpad=0.1, color_scheme=color_dict)
else:
logo = logomaker.Logo(df, ax=ax, vpad=0.1)
logo.style_spines(visible=False)
logo.style_spines(spines=['left'], visible=False, bounds=[0, 1])
#logo.style_spines(spines=['bottom'], visible=False)
logo.style_spines(spines=['bottom'], visible=False)
#logo.style_xticks(rotation=90, fmt='%d', anchor=0)
if add_x_ticks:
l_seq = len(df)
first_tick = round10up(x_tick_start)
anchor = round10down(x_tick_start) - x_tick_start
if ref_pol == "-":
x_tick_end = x_tick_start + l_seq - 1
anchor = x_tick_end - round10up(x_tick_end)
end_range = first_tick-x_tick_spacing+l_seq-anchor
if not anchor:
end_range = first_tick+l_seq+anchor
# Fix xticks.
logo.style_xticks(rotation=0, fmt='%d', anchor=anchor, spacing=10)
x_tick_numbers = []
for x in range(first_tick, end_range, x_tick_spacing):
x_tick_numbers.append(x)
if ref_pol == "-":
x_tick_numbers.reverse()
x_tick_labels = []
for x in x_tick_numbers:
if koma_sepp_1000:
x_tick_labels.append('{:,}'.format(x))
else:
x_tick_labels.append('%d'%x)
#print("x_tick_start:", x_tick_start)
#print("first_tick:", first_tick)
#print("anchor:", anchor)
#print("x_tick_labels:", x_tick_labels)
#del x_tick_labels[0]
logo.ax.set_xticklabels(tuple(x_tick_labels))
#logo.ax.set_xticklabels('%d'%x for x in range(x_tick_start-1, l_seq, x_tick_spacing))
#logo.style_xticks(anchor=0, spacing=x_tick_spacing, rotation=0)
#logo.ax.set_xticklabels([])
logo.ax.xaxis.set_ticks_position('none')
logo.ax.xaxis.set_tick_params(pad=-4, labelsize=5)
#logo.ax.set_xticklabels('%+d'%x for x in [-3, -2, -1, 1, 2, 3, 4, 5, 6])
#logo.style_xticks(rotation=90, spacing=10, fmt='%d', anchor=0)
else:
logo.ax.set_xticks([])
logo.ax.set_yticks([])
#logo.ax.set_axis_off()
#logo.ax.set_yticklabels(['0', '1'])
plt.yticks(fontsize=7)
logo.ax.set_ylabel(y_label, labelpad=24, fontsize=y_label_size)
################################################################################
def make_motif_label_plot_df(feat_id, ch_info_dic, motif_matrix):
"""
Make a feature dataframe for label data motif plotting.
Label data: sequence, eia, tra, rra, elem_p.str,
and additional categorical features
Format of ch_info_dic:
ch_info_dic: {'fa': ['C', [0], ['embed'], 'embed'],
'CTFC': ['C', [1, 2], ['0', '1'], 'one_hot'],
'pc.con': ['N', [3], ['phastcons_score'], 'prob'],
'pp.con': ['N', [4], ['phylop_score'], 'minmax2'],
'rra': ['C', [5, 6], ['N', 'R'], '-'],
'str': ['N', [7, 8, 9, 10, 11], ['E', 'H', 'I', 'M', 'S'], 'prob'],
'tra': ['C', [12, 13, 14, 15, 16, 17, 18, 19, 20], ['A', 'B', 'C', 'E', 'F', 'N', 'S', 'T', 'Z'], '-']}
"""
data = {}
assert feat_id in ch_info_dic, "feat_id %s not in ch_info_dic" %(feat_id)
feat_data = ch_info_dic[feat_id]
feat_idxs = feat_data[1]
feat_alphabet = feat_data[2]
for c in feat_alphabet:
data[c] = []
for fv in motif_matrix:
for i,fi in enumerate(feat_idxs):
data[feat_alphabet[i]].append(fv[fi])
feat_plot_df = pd.DataFrame(data, columns = feat_alphabet)
feat_plot_df.index.name = "pos"
return feat_plot_df
################################################################################
def make_motif_scores_plot_df(feat_id, ch_info_dic, motif_matrix,
stdev=False):
"""
Make a feature dataframe for scores data motif plotting.
Scores data: pc.con, pp.con, additional numerical features
"""
assert feat_id in ch_info_dic, "feat_id %s not in ch_info_dic" %(feat_id)
assert motif_matrix, "motif_matrix empty"
scores = []
feat_data = ch_info_dic[feat_id]
feat_idxs = feat_data[1]
assert len(feat_idxs) == 1, "len(feat_idxs) != 1 for feature %s" %(feat_id)
feat_idx = feat_idxs[0]
# Get scores list.
for fv in motif_matrix:
scores.append(fv[feat_idx])
# Make dataframe.
data = {}
if stdev:
assert len(scores) == len(stdev), "len(scores) != len(stdev) for feature ID %s" %(feat_id)
data = {'pos': [], 'score': [], 'stdev': []}
else:
data = {'pos': [], 'score': []}
for i,s in enumerate(scores):
data['pos'].append(i) # i+1 ?
data['score'].append(s)
if stdev:
data['stdev'].append(stdev[i])
if stdev:
plot_df = pd.DataFrame(data, columns = ['pos', 'score', 'stdev'])
else:
plot_df = pd.DataFrame(data, columns = ['pos', 'score'])
return plot_df
################################################################################
def add_motif_label_plot(df, fig, gs, i,
color_dict=False,
y_label_size=9,
y_label="exon-intron"):
"""
Make exon-intron label plot.
"""
ax = fig.add_subplot(gs[i, :])
if color_dict:
logo = logomaker.Logo(df, ax=ax, vpad=0.1, color_scheme=color_dict)
else:
# Nucleotides plot.
logo = logomaker.Logo(df, ax=ax, vpad=0.1)
logo.style_spines(visible=False)
logo.style_spines(spines=['left'], visible=False, bounds=[0, 1])
logo.style_spines(spines=['bottom'], visible=False)
logo.ax.set_xticks([])
logo.ax.set_yticks([])
plt.yticks(fontsize=7)
#logo.ax.set_axis_off()
#logo.ax.set_yticklabels(['0', '1'])
logo.ax.set_ylabel(y_label, labelpad=24, fontsize=y_label_size)
################################################################################
def add_phastcons_scores_plot(df, fig, gs, i,
stdev=False,
y_label_size=9,
disable_y_labels=False,
y_label="phastCons score"):
"""
Make phastCons conservation scores plot.
phastCons values range from 0 .. 1.
plt.bar options:
color = 'red'
width = .5
align='edge' # tick alignment to bars
Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
yerr=df['stdev']
"""
ax = fig.add_subplot(gs[i, :])
#ax = plt.gca()
if stdev:
df.plot(kind='bar', x='pos', y='score', yerr=df['stdev'], ecolor='grey', ax=ax, width = 1, legend=False)
else:
df.plot(kind='bar', x='pos', y='score', ax=ax, width = 1, legend=False)
#ax.axhline(y=0, color='k', linewidth=0.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
#ax.set_xlim(-0.5, max(df['pos']-0.5))
#ax.set_xlim(-0.6, max(df['pos']-0.6))
ax.set_xlim(-0.5, max(df['pos']+0.5))
if stdev:
ax.errorbar(x=df['pos'], y=df['score'], yerr=df['stdev'], ecolor='grey', ls='none')
ax.set_ylabel(y_label, labelpad=12, fontsize=y_label_size)
ax.set_xlabel('')
# style using Axes methods
#nn_logo.ax.set_xlim([20, 115])
#ax.set_xticks([]) # no x-ticks.
#nn_logo.ax.set_ylim([-.6, .75])
if not disable_y_labels:
ax.set_ylim([0, 1])
ax.set_yticks([0, 1])
ax.set_yticklabels(['0', '1'])
ax.set_xticks([])
ax.set_xticklabels([])
plt.yticks(fontsize=7)
#nn_logo.ax.set_ylabel('score', labelpad=-1)
#nn_logo.ax.set_ylabel('score')
################################################################################
def add_phylop_scores_plot(df, fig, gs, i,
stdev=False,
y_label_size=9,
y_label="phyloP score"):
"""
Make phyloP conservation scores plot.
Normalized phyloP values range from -1 .. 1.
plt.bar options:
color = 'red'
width = .5
align='edge' # tick alignment to bars
Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ecolor='#3376ae'
"""
ax = fig.add_subplot(gs[i, :])
#ax = plt.gca()
if stdev:
df.plot(kind='bar', x='pos', y='score', yerr=df['stdev'], ecolor='grey', ax=ax, width = 1, legend=False)
else:
df.plot(kind='bar', x='pos', y='score', ax=ax, width = 1, legend=False)
#ax.axhline(y=0, color='k', linewidth=0.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
ax.set_xlim(-0.5, max(df['pos']+0.5))
if stdev:
ax.errorbar(x=df['pos'], y=df['score'], xerr=None, yerr=df['stdev'], ecolor='grey', ls='none')
ax.set_ylabel(y_label, labelpad=9, fontsize=y_label_size)
ax.set_xlabel('')
# style using Axes methods
#nn_logo.ax.set_xlim([20, 115])
#ax.set_xticks([]) # no x-ticks.
#nn_logo.ax.set_ylim([-.6, .75])
ax.set_ylim([-1, 1])
ax.set_yticks([-1, 0, 1])
ax.set_yticklabels(['-1', '0', '1'])
ax.set_xticks([])
ax.set_xticklabels([])
plt.yticks(fontsize=7)
#nn_logo.ax.set_ylabel('score', labelpad=-1)
#nn_logo.ax.set_ylabel('score')
################################################################################
def make_feature_attribution_plot(seq, feat_list, ch_info_dic,
plot_out_file,
x_tick_start=1,
koma_sepp_1000=False,
ref_pol="+",
sal_list=False,
avg_sal_list=False,
single_pert_list=False,
best_win_pert_list=False,
worst_win_pert_list=False):
"""
Make a feature attribution plot, showing for each sequence position
the importance score, as well as additional features in subplots.
logomaker (pip install logomaker) is used for plotting.
Dependencies:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import logomaker
"""
# Checks.
assert seq, "given seq empty"
assert feat_list, "given feat_list list empty"
assert ch_info_dic, "given ch_info_dic list empty"
assert plot_out_file, "given plot_out_file empty"
assert len(seq) == len(feat_list), "len(seq) != len(feat_list)"
# Dataframe for importance scores.
seq_alphabet = ["A", "C", "G", "U"]
#is_df = seq_to_plot_df(seq, seq_alphabet, scores=profile_scores)
sl_df = seq_to_plot_df(seq, seq_alphabet)
# Number of plots.
n_subplots = 1
height_ratios = [1]
if sal_list:
assert len(seq) == len(sal_list), "len(seq) != len(sal_list)"
sal_df = seq_to_plot_df(seq, seq_alphabet, scores=sal_list)
n_subplots += 1
height_ratios.append(1)
if avg_sal_list:
assert len(seq) == len(avg_sal_list), "len(seq) != len(avg_sal_list)"
avg_sal_df = seq_to_plot_df(seq, seq_alphabet, scores=avg_sal_list)
n_subplots += 1
height_ratios.append(1)
if single_pert_list:
assert len(seq) == len(single_pert_list), "len(seq) != len(single_pert_list)"
sing_pert_df = perturb_to_plot_df(single_pert_list)
n_subplots += 1
height_ratios.append(2)
if worst_win_pert_list:
assert len(seq) == len(worst_win_pert_list), "len(seq) != len(worst_win_pert_list)"
worst_win_pert_df = seq_to_plot_df(seq, seq_alphabet, scores=worst_win_pert_list)
n_subplots += 1
height_ratios.append(1)
if best_win_pert_list:
assert len(seq) == len(best_win_pert_list), "len(seq) != len(best_win_pert_list)"
best_win_pert_df = seq_to_plot_df(seq, seq_alphabet, scores=best_win_pert_list)
n_subplots += 1
height_ratios.append(1)
# Heights and number of additional plots.
for fid in ch_info_dic:
if fid == "fa":
continue
n_subplots += 1
height_ratios.append(1)
# Init plot.
fig_width = 8
fig_height = 0.8 * n_subplots
fig = plt.figure(figsize=(fig_width, fig_height))
gs = gridspec.GridSpec(nrows=n_subplots, ncols=1, height_ratios=height_ratios)
# Plot subplots.
i_plot = 0
color_dict = {'A' : '#008000', 'C': '#0000ff', 'G': '#ffa600', 'U': '#ff0000'}
# Plot sequence label plot first.
add_label_plot(sl_df, fig, gs, i_plot,
color_dict=color_dict,
y_label="sequence",
add_x_ticks=True,
koma_sepp_1000=koma_sepp_1000,
ref_pol=ref_pol,
x_tick_start=x_tick_start,
y_label_size=4)
# add_importance_scores_plot(is_df, fig, gs, i_plot,
# color_dict=color_dict,
# y_label_size=5.5)
# Saliency plot.
if sal_list:
i_plot += 1
add_saliency_scores_plot(sal_df, fig, gs, i_plot,
color_dict=color_dict,
y_label="saliency",
y_label_size=4)
# Average saliency plot.
if avg_sal_list:
i_plot += 1
add_saliency_scores_plot(avg_sal_df, fig, gs, i_plot,
color_dict=color_dict,
y_label="avg_saliency",
y_label_size=4)
# Single position perturbation scores plot.
if single_pert_list:
i_plot += 1
add_saliency_scores_plot(sing_pert_df, fig, gs, i_plot,
color_dict=color_dict,
y_label="single_nt_mut",
y_label_size=4)
# Worst window perturbation scores plot.
if worst_win_pert_list:
i_plot += 1
add_saliency_scores_plot(worst_win_pert_df, fig, gs, i_plot,
color_dict=color_dict,
y_label="worst_win_mut",
y_label_size=4)
# Best window perturbation scores plot.
if best_win_pert_list:
i_plot += 1
add_saliency_scores_plot(best_win_pert_df, fig, gs, i_plot,
color_dict=color_dict,
y_label="best_win_mut",
y_label_size=4)
"""
Format of ch_info_dic:
ch_info_dic: {'fa': ['C', [0], ['embed'], 'embed'],
'CTFC': ['C', [1, 2], ['0', '1'], 'one_hot'],
'pc.con': ['N', [3], ['phastcons_score'], 'prob'],
'pp.con': ['N', [4], ['phylop_score'], 'minmax2'],
'rra': ['C', [5, 6], ['N', 'R'], '-'],
'str': ['N', [7, 8, 9, 10, 11], ['E', 'H', 'I', 'M', 'S'], 'prob'],
'tra': ['C', [12, 13, 14, 15, 16, 17, 18, 19, 20], ['A', 'B', 'C', 'E', 'F', 'N', 'S', 'T', 'Z'], '-']}
"""
# Plot additional plots.
for fid, fdt in sorted(ch_info_dic.items()):
if fid == "fa":
continue
feat_type = fdt[0]
feat_idxs = fdt[1]
feat_alphabet = fdt[2]
feat_encoding = fdt[3]
l_idxs = len(feat_idxs)
# Plot index.
i_plot += 1
if feat_type == "C":
# Categorical data.
feat_str = ""
for fv in feat_list:
for i,fi in enumerate(feat_idxs):
if fv[fi] == 1:
feat_str += feat_alphabet[i]
break
c_df = seq_to_plot_df(feat_str, feat_alphabet)
color_dict = False
add_label_plot(c_df, fig, gs, i_plot, color_dict=color_dict, y_label=fid,
y_label_size=4)
elif feat_type == "N":
# Numerical data.
data = {}
color_dict = False
# Check.
for c in feat_alphabet:
data[c] = []
for fv in feat_list:
for i,fi in enumerate(feat_idxs):
data[feat_alphabet[i]].append(fv[fi])
#plot_df = pd.DataFrame(data, columns = feat_alphabet)
#plot_df.index.name = "pos"
if fid == "pc.con":
assert l_idxs == 1, "len(feat_idxs) != 1 for pc.con feature (instead: %i)" %(l_idxs)
pc_con_df = scores_to_plot_df(data[feat_alphabet[0]])
add_phastcons_scores_plot(pc_con_df, fig, gs, i_plot,
y_label="phastCons",
y_label_size=4)
elif fid == "pp.con":
assert l_idxs == 1, "len(feat_idxs) != 1 for pp.con feature (instead: %i)" %(l_idxs)
pp_con_df = scores_to_plot_df(data[feat_alphabet[0]])
add_phylop_scores_plot(pp_con_df, fig, gs, i_plot,
y_label="phyloP",
y_label_size=4)
elif fid == "str":
assert l_idxs == 5, "len(feat_idxs) != 5 for str feature (instead: %i)" %(l_idxs)
elem_plot_df = pd.DataFrame(data, columns = feat_alphabet)
elem_plot_df.index.name = "pos"
add_label_plot(elem_plot_df, fig, gs, i_plot,
color_dict=color_dict,
y_label=fid,
y_label_size=4)
else:
# All other numerical values.
assert l_idxs == 1, "len(feat_idxs) != 1 for additional numerical %s feature (instead: %i)" %(fid, l_idxs)
add_n_df = scores_to_plot_df(data[feat_alphabet[0]])
if feat_encoding == "-":
add_phastcons_scores_plot(add_n_df, fig, gs, i_plot,
disable_y_labels=True,
y_label=fid,
y_label_size=4)
elif feat_encoding == "prob": # 0..1
add_phastcons_scores_plot(add_n_df, fig, gs, i_plot,
y_label=fid,
y_label_size=4)
else:
assert False, "invalid feature normalization string given for additional numerical %s feature (got: %s)" %(fid, feat_encoding)
# Store plot.
fig.savefig(plot_out_file, dpi=150, transparent=False)
plt.close(fig)
################################################################################
def motif_seqs_to_plot_df(motif_seqs_ll, alphabet=['A', 'C', 'G', 'U']):
"""
Given a list of sequence character lists (same lengths), make
a position-wise character probability matrix in form of dataframe.
Should work for RNA, eia, rra ... motifs.
E.g.
motif_seqs_ll = [["A", "A", "A", "A", "A"], ["A", "A", "A", "A", "C"], ["C", "C", "A", "A", "G"], ["C", "C", "A", "A", "U"]]
alphabet = ["A", "C", "G", "U"]
Intermediate dic of lists.
data = {'A': [0.5, 0.5, 1.0, 1.0, 0.25], 'C': [0.5, 0.5, 0, 0, 0.25], 'G': [0, 0, 0, 0, 0.25], 'U': [0, 0, 0, 0, 0.25]}
Final dataframe:
A C G T
pos
0 0.5 0.5 0 0
...
motif_seqs_ll:
list of sequence character lists (same lengths).
alphabet:
List of sequence characters to consider for logo generation.
"""
data = {}
for c in alphabet:
data[c] = []
# Number of motifs.
c_motifs = len(motif_seqs_ll)
# Length of motif (length of lists).
motif_len = len(motif_seqs_ll[0])
# Check for same lengths.
for l in motif_seqs_ll:
assert len(l) == motif_len, "differing motif list lengths encountered (%i != %i)" %(len(l), motif_len)
for i in range(motif_len):
cc_dic = {}
for c in alphabet:
cc_dic[c] = 0
for j in range(c_motifs):
e = motif_seqs_ll[j][i]
cc_dic[e] += 1
# Get character probabilities at position i of motif.
for c in cc_dic:
cc = cc_dic[c]
if cc:
cc_dic[c] = cc / c_motifs
for c in cc_dic:
data[c].append(cc_dic[c])
plot_df = pd.DataFrame(data, columns = alphabet)
plot_df.index.name = "pos"
return plot_df
################################################################################
def motif_scores_to_plot_df(motif_sc_ll):
"""
Given a list of score lists (same length), calculate average score
for each position, store in dataframe and return dataframe.
motif_sc_ll = [[1,1,1,1],[2,2,2,2],[3,3,3,3]]
Dictionary of lists intermediate:
data = {'pos': [1,2,3,4], 'scores': [2.0, 2.0, 2.0, 2.0]}
Then create dataframe with
pd.DataFrame (data, columns = ['pos', 'scores'])
"""
assert motif_sc_ll, "given scores list empty"
# Get mean scores list.
scores = list(np.mean(motif_sc_ll, axis=0))
stdev = list(np.std(motif_sc_ll, axis=0))
# Prepare for dataframe.
data = {'pos': [], 'score': [], 'stdev': []}
for i,s in enumerate(scores):
data['pos'].append(i+1)
data['score'].append(s)
data['stdev'].append(stdev[i])
# Stuff into dataframe.
plot_df = pd.DataFrame(data, columns = ['pos', 'score', 'stdev'])
return plot_df
################################################################################
def make_motif_plot(motif_matrix, ch_info_dic, motif_out_file,
fid2stdev_dic=False):
"""
Plot motif using a 2D list with size motif_size*num_features, which
stores the average values for all feature channels.
fid2stdev_dic:
Feature ID to list of standard deviations (list length == motif size).
Store score stdev at each motif position. For one-channel numerical
features (pc.con, pp.con, additional numerical features).
"""
# First make sequence plot, then alphabetically rest of features.
seq_df = make_motif_label_plot_df("fa", ch_info_dic, motif_matrix)
# Number of plots.
n_subplots = 1
height_ratios = [2.5]
# Heights and number of additional plots.
for fid in ch_info_dic:
if fid == "fa":
continue
n_subplots += 1
height_ratios.append(1)
# Init plot.
fig_width = 4.5
fig_height = 1.5 * n_subplots
if n_subplots == 1:
fig_height = 2.5
fig = plt.figure(figsize=(fig_width, fig_height))
gs = gridspec.GridSpec(nrows=n_subplots, ncols=1, height_ratios=height_ratios)
# Plot subplots.
i_plot = 0
# Sequence motif plot.
color_dict = {'A' : '#008000', 'C': '#0000ff', 'G': '#ffa600', 'U': '#ff0000'}
add_motif_label_plot(seq_df, fig, gs, i_plot, color_dict=color_dict, y_label="sequence")
# Plot additional plots.
for fid, fdt in sorted(ch_info_dic.items()):
if fid == "fa":
continue
feat_type = fdt[0]
feat_idxs = fdt[1]
feat_alphabet = fdt[2]
feat_encoding = fdt[3]
l_idxs = len(feat_idxs)
color_dict = False
stdev = False
# Plot index.
i_plot += 1
# Categorical data.
if feat_type == "C":
c_df = make_motif_label_plot_df(fid, ch_info_dic, motif_matrix)
add_motif_label_plot(c_df, fig, gs, i_plot,
color_dict=color_dict,
y_label_size=7,
y_label=fid)
elif feat_type == "N":
if fid == "pc.con":
if fid2stdev_dic:
assert fid in fid2stdev_dic, "fid2stdev_dic given but missing feature ID %s" %(fid)
stdev = fid2stdev_dic[fid]
pc_df = make_motif_scores_plot_df(fid, ch_info_dic, motif_matrix,
stdev=stdev)
add_phastcons_scores_plot(pc_df, fig, gs, i_plot,
stdev=stdev,
y_label_size=4)
elif fid == "pp.con":
if fid2stdev_dic:
assert fid in fid2stdev_dic, "fid2stdev_dic given but missing feature ID %s" %(fid)
stdev = fid2stdev_dic[fid]
pp_df = make_motif_scores_plot_df(fid, ch_info_dic, motif_matrix,
stdev=stdev)
add_phylop_scores_plot(pp_df, fig, gs, i_plot,
stdev=stdev,
y_label_size=4)
elif fid == "str":
elem_df = make_motif_label_plot_df(fid, ch_info_dic, motif_matrix)
add_motif_label_plot(elem_df, fig, gs, i_plot,
color_dict=color_dict,
y_label_size=7,
y_label=fid)
else:
# Additional numerical features, treat like pc.con, pp.con.
assert l_idxs == 1, "len(feat_idxs) != 1 for additional numerical %s feature (instead: %i)" %(fid, l_idxs)
if fid2stdev_dic:
assert fid in fid2stdev_dic, "fid2stdev_dic given but missing feature ID %s" %(fid)
stdev = fid2stdev_dic[fid]
add_n_df = make_motif_scores_plot_df(fid, ch_info_dic, motif_matrix,
stdev=stdev)
if feat_encoding == "-":
add_phastcons_scores_plot(add_n_df, fig, gs, i_plot,
stdev=stdev,
disable_y_labels=True,
y_label=fid,
y_label_size=4)
elif feat_encoding == "prob": # 0..1
add_phastcons_scores_plot(add_n_df, fig, gs, i_plot,
stdev=stdev,
y_label=fid,
y_label_size=4)
else:
assert False, "invalid feature normalization string given for additional numerical %s feature (got: %s)" %(fid, feat_encoding)
# Store plot.
fig.savefig(motif_out_file, dpi=150, transparent=False)
plt.close(fig)
################################################################################
def create_test_set_lengths_plot(test_len_list, out_plot,
theme=1,
scale_zero_max=False):
"""
Create a box plot, showing the distribution of test set lengths.
Given a list of test set lengths, create a dataframe
using Pandas, and use seaborn for plotting.
Store plot in out_plot.
Midnight Blue theme.
ffffff : white
190250 : midnight blue
fcc826 : yellowish
fd3b9d : pinkish
2f19f3 : dash blue
"""
# Checker.
assert test_len_list, "given list test_len_list empty"
if scale_zero_max:
# Get maximum length for scaling.
max_l = max(test_len_list)
# Get next highest number % 10.
max_y = max_l
while max_y % 10:
max_y += 1
# Make pandas dataframe.
test_label = "Test set"
data = {'set': [], 'length': []}
test_c = len(test_len_list)
data['set'] += test_c*[test_label]
data['length'] += test_len_list
df = pd.DataFrame (data, columns = ['set','length'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.boxplot(x="set", y="length", data=df, palette=['cyan'],
width=0.7, linewidth = 1.5, boxprops=dict(alpha=.7))
# Modify.
ax.set_ylabel("Length (nt)",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
if scale_zero_max:
ax.set_ylim([0,max_y])
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight')
elif theme == 2:
# Theme colors.
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Custom flier (outlier) edge and face colors.
flierprops = dict(markersize=5, markerfacecolor=box_color, markeredgecolor=text_color)
boxprops = dict(color=box_color, edgecolor=text_color)
medianprops = dict(color=text_color)
meanprops = dict(color=text_color)
whiskerprops = dict(color=text_color)
capprops = dict(color=text_color)
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.boxplot(x="set", y="length", data=df,
flierprops=flierprops,
boxprops=boxprops,
meanprops=meanprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
width=0.7, linewidth = 1.5)
# Modify.
ax.set_ylabel("Length (nt)",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
if scale_zero_max:
ax.set_ylim([0,max_y])
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight', transparent=True)
################################################################################
def create_test_set_entropy_plot(test_entr_list, out_plot,
theme=1):
"""
Create a box plot, showing the distribution of sequence entropies for
the test dataset.
Given a list entropies for the test dataset, create a dataframe
using Pandas, and use seaborn for plotting.
Store plot in out_plot.
"""
# Checker.
assert test_entr_list, "given list test_entr_list empty"
# Make pandas dataframe.
test_label = "Test set"
data = {'set': [], 'entropy': []}
test_c = len(test_entr_list)
data['set'] += test_c*[test_label]
data['entropy'] += test_entr_list
df = pd.DataFrame (data, columns = ['set','entropy'])
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.boxplot(x="set", y="entropy", data=df, palette=['cyan'],
width=0.7, linewidth = 1.5, boxprops=dict(alpha=.7))
# Modify.
ax.set_ylabel("Sequence complexity",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight')
elif theme == 2:
"""
Midnight Blue theme:
====================
HTML Hex colors:
ffffff : white
190250 : midnight blue
fcc826 : yellowish
fd3b9d : pinkish
2f19f3 : dash blue
bgcolor="#190250"
text="#ffffff"
link="#fd3b9d"
vlink="#fd3b9d"
alink="#fd3b9d"
Editing matplotlib boxplot element props:
(from matplotlib.axes.Axes.boxplot)
boxprops
whiskerprops
flierprops
medianprops
meanprops
"""
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Custom flier (outlier) edge and face colors.
flierprops = dict(markersize=5, markerfacecolor=box_color, markeredgecolor=text_color)
boxprops = dict(color=box_color, edgecolor=text_color)
medianprops = dict(color=text_color)
meanprops = dict(color=text_color)
whiskerprops = dict(color=text_color)
capprops = dict(color=text_color)
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.boxplot(x="set", y="entropy", data=df,
flierprops=flierprops,
boxprops=boxprops,
meanprops=meanprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
width=0.7, linewidth = 1.5)
# Modify.
ax.set_ylabel("Sequence complexity",fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
ax.set(xlabel=None)
# Store plot.
fig.savefig(out_plot, dpi=125, bbox_inches='tight', transparent=True)
################################################################################
def create_test_set_dint_plot(test_dintp_dic, out_plot,
theme=1):
"""
Create a grouped bar plot, showing the di-nucleotide percentages
(16 classes) in the test dataset.
Given a dictionary for test dataset, with key being
di-nucleotide and value the percentage.
Create a dataframe using Pandas, and use seaborn for plotting.
Store plot in out_plot.
MV colors:
#69e9f6, #f154b2
"""
# Checker.
assert test_dintp_dic, "given dictionary test_dintp_dic empty"
# Make pandas dataframe.
test_label = "Test set"
data = {'dint': [], 'perc': []}
for dint in test_dintp_dic:
data['dint'].append(dint)
data['perc'].append(test_dintp_dic[dint])
df = pd.DataFrame (data, columns = ['dint', 'perc'])
y_label = "Percentage (%)"
if theme == 1:
theme_palette = []
for dint in test_dintp_dic:
theme_palette.append("#69e9f6")
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.barplot(x="dint", y="perc", data=df, ecolor="darkgrey",
palette=theme_palette,
edgecolor="lightgrey")
fig.set_figwidth(11)
fig.set_figheight(3.5)
ax.set(xlabel=None)
ax.set_ylabel(y_label,fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
fig.savefig(out_plot, dpi=100, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
theme_palette = []
for dint in test_dintp_dic:
theme_palette.append("blue")
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.barplot(x="dint", y="perc", data=df, ecolor="#fcc826",
palette=theme_palette,
edgecolor="#fcc826")
fig.set_figwidth(11)
fig.set_figheight(3.5)
ax.set(xlabel=None)
ax.set_ylabel(y_label,fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
fig.savefig(out_plot, dpi=100, bbox_inches='tight', transparent=True)
################################################################################
def generate_test_set_top_kmer_table(test_kmer_dic,
top=5,
val_type="c"):
"""
Given k-mer count dictionaries for the test dataset, generate
a markdown table with top x k-mers (sorted by decending dictionary value).
val_type:
Specify type of stored dictionary value.
c : count (count of k-mer)
r : ratio (k-mer count / total k-mer count)
p : percentage ( (k-mer count / total k-mer count) * 100)
"""
assert test_kmer_dic, "given dictionary test_kmer_dic empty"
assert re.search("^[c|p|r]$", val_type), "invalid val_type given"
# Get size of k.
k = 0
for kmer in test_kmer_dic:
k = len(kmer)
break
# Expected kmer number.
exp_kmer_nr = pow(4,k)
test_kmer_nr = 0
neg_kmer_nr = 0
for kmer in test_kmer_dic:
kc = test_kmer_dic[kmer]
if kc:
test_kmer_nr += 1
test_kmer_perc = "%.2f " %((test_kmer_nr / exp_kmer_nr) * 100) + " %"
# Adjust decimal places based on k-mer size.
dc_p = 2
dc_r = 4
if k > 3:
for i in range(k-3):
dc_p += 1
dc_r += 1
dc_p_str = "%."+str(dc_p)+"f"
dc_r_str = "%."+str(dc_r)+"f"
add_ch = ""
if val_type == "p":
add_ch = " %"
# Format test_kmer_dic to two decimal places.
for kmer in test_kmer_dic:
new_v = dc_p_str % test_kmer_dic[kmer]
test_kmer_dic[kmer] = new_v
elif val_type == "r":
# Format percentage to four decimal places.
for kmer in test_kmer_dic:
new_v = dc_r_str % test_kmer_dic[kmer]
test_kmer_dic[kmer] = new_v
# Get top j k-mers.
i = 0
test_topk_list = []
for kmer, v in sorted(test_kmer_dic.items(), key=lambda item: item[1], reverse=True):
i += 1
if i > top:
break
test_topk_list.append(kmer)
# Generate markdown table.
mdtable = "| Rank | Test set |\n"
mdtable += "| :-: | :-: |\n"
for i in range(top):
test_kmer = test_topk_list[i]
pos = i + 1
mdtable += "| %i | %s (%s%s) |\n" %(pos, test_kmer, str(test_kmer_dic[test_kmer]), add_ch)
mdtable += "| ... | |\n"
mdtable += "| # distinct k-mers | %i (%s) |\n" %(test_kmer_nr, test_kmer_perc)
# Return markdown table.
return mdtable
################################################################################
def create_test_set_str_elem_plot(test_str_stats_dic, out_plot,
theme=1):
"""
Create a bar plot, showing average probabilities of secondary
structure elements (U, E, H, I, M, S) in the test set.
test_str_stats_dic contains statistics for test set (mean + stdev values).
Create a dataframe using Pandas, and use seaborn for plotting.
Store plot in out_plot.
Stats dictionary content.
stats_dic["U"] = [pu_mean, pu_stdev]
stats_dic["S"] = [ps_mean, ps_stdev]
stats_dic["E"] = [pe_mean, pe_stdev]
stats_dic["H"] = [ph_mean, ph_stdev]
stats_dic["I"] = [pi_mean, pi_stdev]
stats_dic["M"] = [pm_mean, pm_stdev]
"""
# Checker.
assert test_str_stats_dic, "given dictionary test_str_stats_dic empty"
# Make pandas dataframe.
data = {'elem': [], 'mean_p': [], 'stdev_p': []}
theme1_palette = []
theme2_palette = []
for el in test_str_stats_dic:
if not re.search("^[U|S|E|H|I|M]$", el):
continue
data['elem'].append(el)
data['mean_p'].append(test_str_stats_dic[el][0])
data['stdev_p'].append(test_str_stats_dic[el][1])
theme1_palette.append("#69e9f6")
theme2_palette.append("blue")
df = pd.DataFrame (data, columns = ['elem', 'mean_p', 'stdev_p'])
y_label = "Mean probability"
if theme == 1:
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.barplot(x="elem", y="mean_p", data=df, ecolor="darkgrey",
palette=theme1_palette, # yerr=df['stdev_p'],
edgecolor="lightgrey")
fig.set_figwidth(5)
fig.set_figheight(4)
ax.set(xlabel=None)
ax.set_ylabel(y_label,fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=100, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.barplot(x="elem", y="mean_p", data=df, ecolor="#fcc826",
palette=theme2_palette, # yerr=df['stdev_p'],
edgecolor="#fcc826")
fig.set_figwidth(5)
fig.set_figheight(4)
ax.set(xlabel=None)
ax.set_ylabel(y_label,fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=14)
fig.savefig(out_plot, dpi=100, bbox_inches='tight', transparent=True)
################################################################################
def create_test_set_region_annot_plot(test_ra_dic, out_plot,
plot_labels,
perc=False,
theme=1):
"""
Create a bar plot for region labels, given plot_labels to define what
counts to plot. If perc=True, look for "total_pos" dictionary entry
to normalize counts and plot percentages.
Input dictionary has following keys:
labels, total_pos
Create a dataframe using Pandas, and use seaborn for plotting.
Store plot in out_plot.
MV colors:
#69e9f6, #f154b2
"""
# Checker.
assert test_ra_dic, "given dictionary test_ra_dic empty"
assert plot_labels, "given labels to plot list empty"
if perc:
assert test_ra_dic["total_pos"], "total_pos key missing in test_ra_dic"
# Make pandas dataframe.
data = {'label': [], 'count': []}
for l in test_ra_dic:
if l in plot_labels:
lc = test_ra_dic[l]
if perc:
lc = (lc / test_ra_dic["total_pos"]) * 100
data['label'].append(l)
data['count'].append(lc)
df = pd.DataFrame (data, columns = ['count', 'label'])
y_label = "# positions"
if perc:
y_label = "Percentage (%)"
if theme == 1:
theme_palette = []
for dint in test_ra_dic:
theme_palette.append("#69e9f6")
# Make plot.
sns.set(style="darkgrid")
fig, ax = plt.subplots()
sns.barplot(x="label", y="count", data=df, ecolor="darkgrey",
palette=theme_palette,
edgecolor="lightgrey")
fig.set_figwidth(8)
fig.set_figheight(4)
ax.set(xlabel=None)
ax.set_ylabel(y_label,fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
fig.savefig(out_plot, dpi=100, bbox_inches='tight')
elif theme == 2:
text_color = "#fcc826"
plot_color = "#fd3b9d"
box_color = "#2f19f3"
theme_palette = []
for dint in test_ra_dic:
theme_palette.append("blue")
# Make plot.
sns.set(style="darkgrid", rc={ "axes.labelcolor": text_color, "text.color": text_color, "xtick.color": text_color, "ytick.color": text_color, "grid.color": plot_color, "axes.edgecolor": plot_color})
fig, ax = plt.subplots()
sns.barplot(x="label", y="count", data=df, ecolor="#fcc826",
palette=theme_palette,
edgecolor="#fcc826")
fig.set_figwidth(11)
fig.set_figheight(3.5)
ax.set(xlabel=None)
ax.set_ylabel(y_label,fontsize=18)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=12)
fig.savefig(out_plot, dpi=100, bbox_inches='tight', transparent=True)
################################################################################
def rp_gp_generate_html_report(test_seqs_dic, out_folder,
dataset_type, rplib_path,
html_report_out=False,
plots_subfolder="plots_rnaprot_gp",
test_str_stats_dic=False,
test_phastcons_stats_dic=False,
test_phylop_stats_dic=False,
test_eia_stats_dic=False,
test_tra_stats_dic=False,
test_rra_stats_dic=False,
add_feat_dic_list=False,
target_gbtc_dic=False,
all_gbtc_dic=False,
t2hc_dic=False,
t2i_dic=False,
theme=1,
kmer_top=10,
target_top=10,
rna=True,
):
"""
Generate HTML report for rnaprot gp, providing statistics for the
generated prediction dataset.
test_seqs_dic:
Sequences dictionary.
test_str_stats_dic:
Structure statistics dictionary
test_phastcons_stats_dic:
Phastcons scores statistics dictionary
...
out_folder:
rnaprot gp results output folder, to store report in.
rna:
Set True if input sequences are RNA.
html_report_out:
HTML report output file.
target_gbtc_dic:
Gene biotype counts for target set dictionary.
all_gbtc_dic:
Gene biotype counts for all genes dictionary (gene biotype -> count)
t2hc_dic:
Transcript ID to hit count (# sites on transcript) dictionary.
t2i_dic:
Transcript ID to info list dictionary.
"""
# Checks.
ds_types = {'s':1, 't':1, 'g':1}
assert dataset_type in ds_types, "invalid dataset type given (expected g, s, or t)"
# Import markdown to generate report.
from markdown import markdown
# Output subfolder for plots.
plots_folder = plots_subfolder
plots_out_folder = out_folder + "/" + plots_folder
if not os.path.exists(plots_out_folder):
os.makedirs(plots_out_folder)
# Output files.
html_out = out_folder + "/" + "report.rnaprot_gp.html"
if html_report_out:
html_out = html_report_out
# Plot files.
lengths_plot = "set_lengths_plot.png"
entropy_plot = "sequence_complexity_plot.png"
dint_plot = "dint_percentages_plot.png"
str_elem_plot = "str_elem_plot.png"
phastcons_plot = "phastcons_plot.png"
phylop_plot = "phylop_plot.png"
eia_plot = "exon_intron_region_plot.png"
tra_plot = "transcript_region_plot.png"
rra_plot = "repeat_region_plot.png"
lengths_plot_out = plots_out_folder + "/" + lengths_plot
entropy_plot_out = plots_out_folder + "/" + entropy_plot
dint_plot_out = plots_out_folder + "/" + dint_plot
str_elem_plot_out = plots_out_folder + "/" + str_elem_plot
phastcons_plot_out = plots_out_folder + "/" + phastcons_plot
phylop_plot_out = plots_out_folder + "/" + phylop_plot
eia_plot_out = plots_out_folder + "/" + eia_plot
tra_plot_out = plots_out_folder + "/" + tra_plot
rra_plot_out = plots_out_folder + "/" + rra_plot
print("Generate statistics for HTML report ... ")
# Site numbers.
c_test_out = len(test_seqs_dic)
# Site lengths.
test_len_list = get_seq_len_list_from_dic(test_seqs_dic)
# Get entropy scores for sequences.
test_entr_list = seqs_dic_calc_entropies(test_seqs_dic, rna=rna,
uc_part_only=True)
# Get set nucleotide frequencies.
test_ntc_dic = seqs_dic_count_nt_freqs(test_seqs_dic, rna=rna,
convert_to_uc=True)
# Get nucleotide ratios.
test_ntr_dic = ntc_dic_to_ratio_dic(test_ntc_dic, perc=True)
# Get dinucleotide percentages.
test_dintr_dic = seqs_dic_count_kmer_freqs(test_seqs_dic, 2, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
# Get 3-mer percentages.
test_3mer_dic = seqs_dic_count_kmer_freqs(test_seqs_dic, 3, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
# Get 4-mer percentages.
test_4mer_dic = seqs_dic_count_kmer_freqs(test_seqs_dic, 4, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
# Get 5-mer percentages.
test_5mer_dic = seqs_dic_count_kmer_freqs(test_seqs_dic, 5, rna=rna,
return_ratios=True,
perc=True,
report_key_error=True,
convert_to_uc=True)
# Logo paths.
logo1_path = rplib_path + "/content/logo1.png"
logo2_path = rplib_path + "/content/logo2.png"
# Create theme-specific HTML header.
if theme == 1:
mdtext = """
<head>
<title>RNAProt - Prediction Set Generation Report</title>
</head>
<img src="%s" alt="rp_logo"
title="rp_logo" width="600" />
""" %(logo1_path)
elif theme == 2:
mdtext = """
<head>
<title>RNAProt - Prediction Set Generation Report</title>
<style>
h1 {color:#fd3b9d;}
h2 {color:#fd3b9d;}
h3 {color:#fd3b9d;}
</style>
</head>
<img src="%s" alt="rp_logo"
title="rp_logo" width="500" />
<body style="font-family:sans-serif" bgcolor="#190250" text="#fcc826" link="#fd3b9d" vlink="#fd3b9d" alink="#fd3b9d">
""" %(logo2_path)
else:
assert False, "invalid theme ID given"
# Add first section markdown.
mdtext += """
# Prediction set generation report
List of available statistics for the prediction dataset generated
by RNAProt (rnaprot gp):
- [Prediction dataset statistics](#set-stats)
- [Site length distribution](#len-plot)
- [Sequence complexity distribution](#ent-plot)
- [Di-nucleotide distribution](#dint-plot)
- [Top k-mer statistics](#kmer-stats)"""
if test_str_stats_dic:
mdtext += "\n"
if 'S' in test_str_stats_dic:
mdtext += "- [Structural elements distribution](#str-elem-plot)\n"
mdtext += "- [Secondary structure statistics](#bp-stats)"
else:
# If only --bp-in selected.
mdtext += "- [Secondary structure statistics](#bp-stats)"
if test_phastcons_stats_dic or test_phylop_stats_dic:
mdtext += "\n"
mdtext += "- [Conservation scores distribution](#con-plot)\n"
mdtext += "- [Conservation scores statistics](#con-stats)"
if test_eia_stats_dic:
occ_labels = ["F", "T"]
mdtext += "\n"
mdtext += "- [Exon-intron region distribution](#eia-plot)\n"
mdtext += "- [Exon-intron region statistics](#eia-stats)"
if test_tra_stats_dic:
occ_labels = ["S", "E", "A", "Z", "B"]
mdtext += "\n"
mdtext += "- [Transcript region distribution](#tra-plot)\n"
mdtext += "- [Transcript region statistics](#tra-stats)"
if test_rra_stats_dic:
mdtext += "\n"
mdtext += "- [Repeat region distribution](#rra-plot)\n"
mdtext += "- [Repeat region statistics](#rra-stats)"
if target_gbtc_dic and all_gbtc_dic:
mdtext += "\n"
mdtext += "- [Target gene biotype statistics](#gbt-stats)"
if t2hc_dic and t2i_dic:
mdtext += "\n"
mdtext += "- [Target region overlap statistics](#tro-stats)"
if add_feat_dic_list:
mdtext += "\n"
mdtext += "- [BED feature statistics](#bed-stats)\n"
mdtext += "\n \n"
# Make general stats table.
mdtext += """
## Prediction dataset statistics ### {#set-stats}
**Table:** Prediction dataset statistics regarding sequence lengths
(min, max, mean, and median length) in nucleotides (nt),
sequence complexity (mean Shannon entropy over all sequences in the set)
and nucleotide contents (A, C, G, U).
"""
mdtext += "| Attribute | Prediction set | \n"
mdtext += "| :-: | :-: |\n"
mdtext += "| # sites | %i |\n" %(c_test_out)
mdtext += "| min site length | %i |\n" %(min(test_len_list))
mdtext += "| max site length | %i |\n" %(max(test_len_list))
mdtext += "| mean site length | %.1f |\n" %(statistics.mean(test_len_list))
mdtext += "| median site length | %i |\n" %(statistics.median(test_len_list))
mdtext += "| mean complexity | %.3f |\n" %(statistics.mean(test_entr_list))
mdtext += '| %A |' + " %.2f |\n" %(test_ntr_dic["A"])
mdtext += '| %C |' + " %.2f |\n" %(test_ntr_dic["C"])
mdtext += '| %G |' + " %.2f |\n" %(test_ntr_dic["G"])
mdtext += '| %U |' + " %.2f |\n" %(test_ntr_dic["U"])
mdtext += "\n \n \n"
# Make site length distribution box plot.
create_test_set_lengths_plot(test_len_list, lengths_plot_out,
theme=theme)
lengths_plot_path = plots_folder + "/" + lengths_plot
mdtext += """
## Site length distribution ### {#len-plot}
Site length distribution in the prediction set. Lengths differences are due
to --in sequences or sites of various lengths.
"""
mdtext += '<img src="' + lengths_plot_path + '" alt="Site length distribution"' + "\n"
mdtext += 'title="Site length distribution" width="500" />' + "\n"
mdtext += """
**Figure:** Site length distribution for the prediction dataset.
"""
# Make sequence complexity box plot.
create_test_set_entropy_plot(test_entr_list, entropy_plot_out,
theme=theme)
entropy_plot_path = plots_folder + "/" + entropy_plot
mdtext += """
## Sequence complexity distribution ### {#ent-plot}
The Shannon entropy is calculated for each sequence to measure
its information content (i.e., its complexity). A sequence with
equal amounts of all four nucleotides has an entropy value of 1.0
(highest possible). A sequence with equal amounts of two nucleotides
has an entropy value of 0.5. Finally, the lowest possible entropy is
achieved by a sequence which contains only one type of nucleotide.
Find the formula used to compute Shannon's entropy
[here](https://www.ncbi.nlm.nih.gov/pubmed/15215465) (see CE formula).
"""
mdtext += '<img src="' + entropy_plot_path + '" alt="Sequence complexity distribution"' + "\n"
mdtext += 'title="Sequence complexity distribution" width="500" />' + "\n"
mdtext += """
**Figure:** Sequence complexity (Shannon entropy
computed for each sequence) distributions for the prediction dataset.
"""
# Make di-nucleotide bar plot.
create_test_set_dint_plot(test_dintr_dic, dint_plot_out,
theme=theme)
dint_plot_path = plots_folder + "/" + dint_plot
mdtext += """
## Di-nucleotide distribution ### {#dint-plot}
Di-nucleotide percentages for the prediction dataset.
"""
mdtext += '<img src="' + dint_plot_path + '" alt="Di-nucleotide distribution"' + "\n"
mdtext += 'title="Di-nucleotide distribution" width="600" />' + "\n"
mdtext += """
**Figure:** Di-nucleotide percentages for the prediction dataset.
"""
# Make the k-mer tables.
top3mertab = generate_test_set_top_kmer_table(test_3mer_dic,
top=kmer_top,
val_type="p")
top4mertab = generate_test_set_top_kmer_table(test_4mer_dic,
top=kmer_top,
val_type="p")
top5mertab = generate_test_set_top_kmer_table(test_5mer_dic,
top=kmer_top,
val_type="p")
mdtext += """
## Top k-mer statistics ### {#kmer-stats}
**Table:** Top %i 3-mers for the prediction dataset and their percentages. In case of uniform distribution with all 3-mers present, each 3-mer would have a percentage = 1.5625.
""" %(kmer_top)
mdtext += top3mertab
mdtext += "\n \n"
mdtext += """
**Table:** Top %i 4-mers for the prediction dataset and their percentages. In case of uniform distribution with all 4-mers present, each 4-mer would have a percentage = 0.390625.
""" %(kmer_top)
mdtext += top4mertab
mdtext += "\n \n"
mdtext += """
**Table:** Top %i 5-mers for the prediction dataset and their percentages. In case of uniform distribution with all 5-mers present, each 5-mer would have a percentage = 0.09765625.
""" %(kmer_top)
mdtext += top5mertab
mdtext += "\n \n \n"
if test_str_stats_dic:
# Checks.
assert test_str_stats_dic['seqlen_sum'], "unexpected total sequence length of 0 encountered"
if 'S' in test_str_stats_dic:
# Make structural elements bar plot.
create_test_set_str_elem_plot(test_str_stats_dic,
str_elem_plot_out,
theme=theme)
str_elem_plot_path = plots_folder + "/" + str_elem_plot
mdtext += """
## Structural elements distribution ### {#str-elem-plot}
Mean position-wise probabilities of the different loop context structural elements are shown
for the prediction dataset. U: unpaired, E: external loop, H: hairpin loop,
I: internal loop, M: multi-loop, S: paired.
"""
mdtext += '<img src="' + str_elem_plot_path + '" alt="Structural elements distribution"' + "\n"
mdtext += 'title="Structural elements distribution" width="400" />' + "\n"
mdtext += """
**Figure:** Mean position-wise probabilities of different loop context structural
elements for the prediction dataset.
U: unpaired, E: external loop, H: hairpin loop,
I: internal loop, M: multi-loop, S: paired.
"""
mdtext += """
## Secondary structure statistics ### {#bp-stats}
**Table:** Secondary structure statistics of the generated prediction set.
"""
mdtext += "| Attribute | Prediction set | \n"
mdtext += "| :-: | :-: |\n"
mdtext += "| total sequence length | %i |\n" %(test_str_stats_dic['seqlen_sum'])
if 'S' in test_str_stats_dic:
mdtext += "| mean p(paired) | %.4f (+-%.4f) |\n" %(test_str_stats_dic['S'][0], test_str_stats_dic['S'][1])
mdtext += "| mean p(unpaired) | %.4f (+-%.4f) |\n" %(test_str_stats_dic['U'][0], test_str_stats_dic['U'][1])
mdtext += "| mean p(external loop) | %.4f (+-%.4f) |\n" %(test_str_stats_dic['E'][0], test_str_stats_dic['E'][1])
mdtext += "| mean p(hairpin loop) | %.4f (+-%.4f) |\n" %(test_str_stats_dic['H'][0], test_str_stats_dic['H'][1])
mdtext += "| mean p(internal loop) | %.4f (+-%.4f) |\n" %(test_str_stats_dic['I'][0], test_str_stats_dic['I'][1])
mdtext += "| mean p(multi loop) | %.4f (+-%.4f) |\n" %(test_str_stats_dic['M'][0], test_str_stats_dic['M'][1])
mdtext += "\n \n \n"
# Conservation scores plots and stats.
if test_phastcons_stats_dic or test_phylop_stats_dic:
mdtext += """
## Conservation scores statistics ### {#con-stats}
**Table:** Conservation scores statistics. Note that phyloP statistics are
calculated before normalization (normalizing values to -1 .. 1).
"""
mdtext += "| Attribute | Prediction set | \n"
mdtext += "| :-: | :-: |\n"
if test_phastcons_stats_dic:
pc_zero_perc = "%.2f" % ((test_phastcons_stats_dic["zero_pos"] / test_phastcons_stats_dic["total_pos"]) * 100)
mdtext += "| # phastCons scores | %i |\n" %(test_phastcons_stats_dic['total_pos'])
mdtext += "| # zero scores | %i |\n" %(test_phastcons_stats_dic['zero_pos'])
mdtext += '| % zero scores |' + " %s |\n" %(pc_zero_perc)
mdtext += "| min score | %s |\n" %(str(test_phastcons_stats_dic['min']))
mdtext += "| max score | %s |\n" %(str(test_phastcons_stats_dic['max']))
mdtext += "| mean score | %.3f (+-%.3f) |\n" %(test_phastcons_stats_dic['mean'], test_phastcons_stats_dic['stdev'])
if test_phylop_stats_dic:
pp_zero_perc = "%.2f" % ((test_phylop_stats_dic["zero_pos"] / test_phylop_stats_dic["total_pos"]) * 100)
mdtext += "| # phyloP scores | %i |\n" %(test_phylop_stats_dic['total_pos'])
mdtext += "| # zero scores | %i |\n" %(test_phylop_stats_dic['zero_pos'])
mdtext += '| % zero scores |' + " %s |\n" %(pp_zero_perc)
mdtext += "| min score | %s |\n" %(str(test_phylop_stats_dic['min']))
mdtext += "| max score | %s |\n" %(str(test_phylop_stats_dic['max']))
mdtext += "| mean score | %.3f (+-%.3f) |\n" %(test_phylop_stats_dic['mean'], test_phylop_stats_dic['stdev'])
mdtext += "\n \n \n"
# Exon-intron region plots and stats.
if test_eia_stats_dic:
mdtext += """
## Exon-intron region distribution ### {#eia-plot}
Distribution of exon and intron regions for the prediction set.
"""
# EIA plot.
create_test_set_region_annot_plot(test_eia_stats_dic, eia_plot_out,
["E", "I", "N"],
perc=True, theme=theme)
eia_plot_path = plots_folder + "/" + eia_plot
mdtext += '<img src="' + eia_plot_path + '" alt="Exon-intron region distribution"' + "\n"
mdtext += 'title="Exon-intron region distribution" width="550" />' + "\n"
mdtext += """
**Figure:** Percentages of exon (E) and intron (I) regions for the prediction set.
If --eia-n is set, also include regions not covered by introns or exons (N).
## Exon-intron region statistics ### {#eia-stats}
**Table:** Exon-intron region statistics for the prediction set.
If --eia-ib is set, also include statistics for sites containing intron
5' (F) and intron 3' (T) ends.
"""
# EIA stats.
if "F" in test_eia_stats_dic:
test_perc_f_sites = "%.2f" % ((test_eia_stats_dic['F'] / c_test_out)*100) + " %"
test_perc_t_sites = "%.2f" % ((test_eia_stats_dic['T'] / c_test_out)*100) + " %"
test_perc_e = "%.2f" % ((test_eia_stats_dic['E'] / test_eia_stats_dic['total_pos'])*100)
test_perc_i = "%.2f" % ((test_eia_stats_dic['I'] / test_eia_stats_dic['total_pos'])*100)
if "N" in test_eia_stats_dic:
test_perc_n = "%.2f" % ((test_eia_stats_dic['N'] / test_eia_stats_dic['total_pos'])*100)
mdtext += "| Attribute | Prediction set | \n"
mdtext += "| :-: | :-: |\n"
mdtext += '| % E |' + " %s |\n" %(test_perc_e)
mdtext += '| % I |' + " %s |\n" %(test_perc_i)
if "N" in test_eia_stats_dic:
mdtext += '| % N |' + " %s |\n" %(test_perc_n)
if "F" in test_eia_stats_dic:
mdtext += "| F sites | %i (%s) |\n" %(test_eia_stats_dic['F'], test_perc_f_sites)
mdtext += "| T sites | %i (%s) |\n" %(test_eia_stats_dic['T'], test_perc_t_sites)
mdtext += "\n \n \n"
# Transcript region plots and stats.
if test_tra_stats_dic:
mdtext += """
## Transcript region distribution ### {#tra-plot}
Distribution of transcript regions for the prediction set.
"""
# TRA plot.
create_test_set_region_annot_plot(test_tra_stats_dic, tra_plot_out,
["F", "C", "T", "N"],
perc=True, theme=theme)
tra_plot_path = plots_folder + "/" + tra_plot
mdtext += '<img src="' + tra_plot_path + '" alt="Transcript region distribution"' + "\n"
mdtext += 'title="Transcript region distribution" width="400" />' + "\n"
mdtext += """
**Figure:** Percentages of 5'UTR (F), CDS (C), and 3'UTR (T) positions as well as
positions not covered by these transcript regions (N) for the prediction set.
## Transcript region statistics ### {#tra-stats}
**Table:** Transcript region statistics for the prediction set.
Percentages of positions covered by 5'UTR (F), CDS (C), 3'UTR (T), or non
of these regions (N) are given.
If --tra-codons is set, also include statistics for start codons (S) and
stop codons (E) (sites which contain these).
If --tra-borders is set, also include statistics for transcript starts (A),
transcript ends (Z), exon borders (B) (sites which contain these).
"""
# TRA stats.
test_perc_f = "%.2f" % ((test_tra_stats_dic['F'] / test_tra_stats_dic['total_pos'])*100)
test_perc_c = "%.2f" % ((test_tra_stats_dic['C'] / test_tra_stats_dic['total_pos'])*100)
test_perc_t = "%.2f" % ((test_tra_stats_dic['T'] / test_tra_stats_dic['total_pos'])*100)
test_perc_n = "%.2f" % ((test_tra_stats_dic['N'] / test_tra_stats_dic['total_pos'])*100)
mdtext += "| Attribute | Prediction set | \n"
mdtext += "| :-: | :-: |\n"
mdtext += '| % F |' + " %s |\n" %(test_perc_f)
mdtext += '| % C |' + " %s |\n" %(test_perc_c)
mdtext += '| % T |' + " %s |\n" %(test_perc_t)
mdtext += '| % N |' + " %s |\n" %(test_perc_n)
# Start stop codon annotations.
if "S" in test_tra_stats_dic:
test_perc_s_sites = "%.2f" % ((test_tra_stats_dic['S'] / c_test_out)*100) + " %"
test_perc_e_sites = "%.2f" % ((test_tra_stats_dic['E'] / c_test_out)*100) + " %"
mdtext += "| S sites | %i (%s) |\n" %(test_tra_stats_dic['S'], test_perc_s_sites)
mdtext += "| E sites | %i (%s) |\n" %(test_tra_stats_dic['E'], test_perc_e_sites)
# Border annotations.
if "A" in test_tra_stats_dic:
test_perc_a_sites = "%.2f" % ((test_tra_stats_dic['A'] / c_test_out)*100) + " %"
test_perc_b_sites = "%.2f" % ((test_tra_stats_dic['B'] / c_test_out)*100) + " %"
test_perc_z_sites = "%.2f" % ((test_tra_stats_dic['Z'] / c_test_out)*100) + " %"
mdtext += "| A sites | %i (%s) |\n" %(test_tra_stats_dic['A'], test_perc_a_sites)
mdtext += "| B sites | %i (%s) |\n" %(test_tra_stats_dic['B'], test_perc_b_sites)
mdtext += "| Z sites | %i (%s) |\n" %(test_tra_stats_dic['Z'], test_perc_z_sites)
mdtext += "\n \n \n"
# Repeat region plots and stats.
if test_rra_stats_dic:
mdtext += """
## Repeat region distribution ### {#rra-plot}
Distribution of repeat regions for the prediction set. Repeat
regions are annotated in the .2bit genomic sequences file as lowercase
sequences. These regions were identified by RepeatMasker and Tandem Repeats
Finder (with period of 12 or less).
"""
# RRA plot.
create_test_set_region_annot_plot(test_rra_stats_dic, rra_plot_out,
["R", "N"],
perc=True, theme=theme)
rra_plot_path = plots_folder + "/" + rra_plot
mdtext += '<img src="' + rra_plot_path + '" alt="Repeat region distribution"' + "\n"
mdtext += 'title="Repeat region distribution" width="400" />' + "\n"
mdtext += """
**Figure:** Percentages of repeat (R) and no-repeat (N) regions for the
prediction set.
## Repeat region statistics ### {#rra-stats}
**Table:** Repeat region statistics for the prediction set.
Percentages of prediction set regions covered by repeat (R)
and non-repeat (N) regions are given.
"""
# RRA stats.
test_perc_r = "%.2f" % ((test_rra_stats_dic['R'] / test_rra_stats_dic['total_pos'])*100)
test_perc_n = "%.2f" % ((test_rra_stats_dic['N'] / test_rra_stats_dic['total_pos'])*100)
mdtext += "| Attribute | Prediction set |\n"
mdtext += "| :-: | :-: |\n"
mdtext += '| % R |' + " %s |\n" %(test_perc_r)
mdtext += '| % N |' + " %s |\n" %(test_perc_n)
mdtext += "\n \n \n"
# Target gene biotype count stats.
if target_gbtc_dic and all_gbtc_dic:
mdtext += """
## Target gene biotype statistics ### {#gbt-stats}
**Table:** Target gene biotype counts for the prediction set and their percentages
(count normalized by total count for the respective gene biotype).
"""
mdtext += "| Gene biotype | Target count | Total count | Percentage | \n"
mdtext += "| :-: | :-: | :-: | :-: |\n"
unit = " %"
for bt, target_c in sorted(target_gbtc_dic.items(), key=lambda item: item[1], reverse=True):
all_c = all_gbtc_dic[bt]
perc_c = "%.2f" % ((target_c / all_c) * 100)
mdtext += "| %s | %i | %i | %s%s |\n" %(bt, target_c, all_c, perc_c, unit)
mdtext += "\n \n \n"
if t2hc_dic and t2i_dic:
mdtext += """
## Target region overlap statistics ### {#tro-stats}
**Table:** Target region overlap statistics, showing the top %i targeted
regions (transcript or genes), with the # overlaps == # of sites
overlapping with the region.
""" %(target_top)
if dataset_type == "t":
mdtext += "| # overlaps | Transcript ID | Transcript biotype | Gene ID | Gene name | Gene biotype | \n"
mdtext += "| :-: | :-: | :-: | :-: | :-: | :-: |\n"
i = 0
for tr_id, ol_c in sorted(t2hc_dic.items(), key=lambda item: item[1], reverse=True):
i += 1
if i > target_top:
break
tr_bt = t2i_dic[tr_id][0]
gene_id = t2i_dic[tr_id][1]
gene_name = t2i_dic[tr_id][2]
gene_bt = t2i_dic[tr_id][3]
mdtext += "| %i | %s | %s | %s | %s | %s |\n" %(ol_c, tr_id, tr_bt, gene_id, gene_name, gene_bt)
mdtext += "| ... | | | | | |\n"
mdtext += "\n \n \n"
elif dataset_type == "g":
mdtext += "| # overlaps | Gene ID | Gene name | Gene biotype | \n"
mdtext += "| :-: | :-: | :-: | :-: |\n"
i = 0
for gene_id, ol_c in sorted(t2hc_dic.items(), key=lambda item: item[1], reverse=True):
i += 1
if i > target_top:
break
gene_name = t2i_dic[gene_id][0]
gene_bt = t2i_dic[gene_id][1]
mdtext += "| %i | %s | %s | %s |\n" %(ol_c, gene_id, gene_name, gene_bt)
mdtext += "| ... | | | |\n"
mdtext += "\n \n \n"
# Additional BED annotations.
if add_feat_dic_list:
mdtext += """
## BED feature statistics ### {#bed-stats}
Additional BED annotation feature statistics (from --feat-in table) for the
prediction set.
"""
test_cov_dic = {}
neg_cov_dic = {}
for test_stats_dic in add_feat_dic_list:
feat_id = test_stats_dic["feat_id"]
feat_type = test_stats_dic["feat_type"]
test_total_pos = test_stats_dic["total_pos"]
test_perc_zero_sites = "%.2f" % ((test_stats_dic['zero_sites'] / test_stats_dic['total_sites'])*100) + " %"
if feat_type == "C":
test_c_0 = test_stats_dic["0"]
test_c_1 = test_stats_dic["1"]
test_perc_0 = "%.2f" % ((test_c_0 / test_total_pos)*100) + " %"
test_perc_1 = "%.2f" % ((test_c_1 / test_total_pos)*100) + " %"
else:
test_mean = test_stats_dic["mean"]
test_stdev = test_stats_dic["stdev"]
test_c_0 = test_stats_dic["zero_pos"]
test_c_1 = test_total_pos - test_c_0
test_perc_0 = "%.2f" % ((test_c_0 / test_total_pos)*100) + " %"
test_perc_1 = "%.2f" % ((test_c_1 / test_total_pos)*100) + " %"
# Store feature coverage (percentage of positions overlapping).
#test_feat_cov = (test_c_1 / test_total_pos) * 100
#test_cov_dic[feat_id] = test_feat_cov
mdtext += """
### BED annotation file feature \"%s\" statistics
""" %(feat_id)
if feat_type == "C":
mdtext += """
**Table:** BED feature region length + score statistics for the
prediction set.
Feature type is one-hot encoding, i.e., every overlapping position
gets a 1 assigned, every not overlapping position a 0.
"""
else:
mdtext += """
**Table:** BED feature region length + score statistics for the
prediction set.
Feature type is numerical, i.e., every position gets the score of the
overlapping feature region assigned. In case of no feature region overlap,
the position gets a score of 0.
"""
mdtext += "| Attribute | Prediction set |\n"
mdtext += "| :-: | :-: |\n"
mdtext += "| mean length | %.2f (+-%.2f) |\n" %(test_stats_dic["mean_l"], test_stats_dic["stdev_l"])
mdtext += "| median length | %i |\n" %(test_stats_dic["median_l"])
mdtext += "| min length | %i |\n" %(test_stats_dic["min_l"])
mdtext += "| max length | %i |\n" %(test_stats_dic["max_l"])
if feat_type == "C":
mdtext += "| # total positions | %i |\n" %(test_total_pos)
mdtext += "| # 0 positions | %i (%s) |\n" %(test_c_0, test_perc_0)
mdtext += "| # 1 positions | %i (%s) |\n" %(test_c_1, test_perc_1)
mdtext += '| % all-zero sites |' + " %s |\n" %(test_perc_zero_sites)
else:
mdtext += "| # total positions | %i |\n" %(test_total_pos)
mdtext += "| # 0 positions | %i (%s) |\n" %(test_c_0, test_perc_0)
mdtext += "| # non-0 positions | %i (%s) |\n" %(test_c_1, test_perc_1)
mdtext += '| % all-zero sites |' + " %s |\n" %(test_perc_zero_sites)
mdtext += "| mean score | %.3f (+-%.3f) |\n" %(test_mean, test_stdev)
mdtext += "\n \n \n"
print("Generate HTML report ... ")
# Convert mdtext to html.
md2html = markdown(mdtext, extensions=['attr_list', 'tables'])
#OUTMD = open(md_out,"w")
#OUTMD.write("%s\n" %(mdtext))
#OUTMD.close()
OUTHTML = open(html_out,"w")
OUTHTML.write("%s\n" %(md2html))
OUTHTML.close()
################################################################################
def get_ext_site_parts(id2bedrow_dic, chr_len_dic,
str_ext=150,
id2ucr_dic=False,
refid_dic=None,
extlen_dic=None,
id2extrow_dic=None,
id2newvp_dic=None):
"""
Get extended site part lengths:
[upstream structure calculation extension,
upstream context extension,
center region,
downstream context extension,
downstream structure calculation extension]
id2bedrow_dic:
Site ID to site BED row (region on reference), e.g.
'sid1': 'id1\t950\t990\tsid1\t0\t+'
chr_len_dic:
reference (chromosome) ID -> reference length dic
To check and prune extended sites at borders.
str_ext:
Amount of structure extension added on both sides,
should be set to plfold_w.
id2ucr_dic:
viewpoint (uppercase) center region start and end
coordinates for each site.
Site ID -> [site_vp_start, site_vp_end]
refid_dic:
Store reference IDs from id2bedrow_dic.
extlen_dic:
Extended site lengths dic.
id2extrow_dic:
Extended reference BED row dictionary.
id2newvp_dic:
Stores new site viewpoint coordinates (not on reference but on sequence!).
Site ID -> [new_vp_start, new_vp_end]
>>> id2bedrow_dic = {'sid1': 'id1\\t950\\t990\\tsid1\\t0\\t+', 'sid2': 'id1\\t500\\t540\\tsid2\\t0\\t+', 'sid3': 'id1\\t0\\t40\\tsid3\\t0\\t+', 'sid4': 'id1\\t10\\t50\\tsid4\\t0\\t-'}
>>> id2ucr_dic = {'sid1': [11,30], 'sid2': [11,30], 'sid3': [11,30], 'sid4': [6,30]}
>>> chr_len_dic = {'id1' : 1000}
>>> get_ext_site_parts(id2bedrow_dic, chr_len_dic, str_ext=100, id2ucr_dic=id2ucr_dic)
{'sid1': [100, 10, 20, 10, 10], 'sid2': [100, 10, 20, 10, 100], 'sid3': [0, 10, 20, 10, 100], 'sid4': [100, 5, 25, 10, 10]}
>>> id2bedrow_dic = {'sid1': 'id1\\t30\\t40\\tsid1\\t0\\t+', 'sid2': 'id1\\t110\\t120\\tsid2\\t0\\t+', 'sid3': 'id1\\t110\\t120\\tsid3\\t0\\t-'}
>>> id2newvp_dic = {}
>>> extlen_dic = {}
>>> id2extrow_dic = {}
>>> refid_dic = {}
>>> chr_len_dic = {'id1' : 200}
>>> get_ext_site_parts(id2bedrow_dic, chr_len_dic, str_ext=100, id2newvp_dic=id2newvp_dic, extlen_dic=extlen_dic, refid_dic=refid_dic, id2extrow_dic=id2extrow_dic)
{'sid1': [30, 0, 10, 0, 100], 'sid2': [100, 0, 10, 0, 80], 'sid3': [80, 0, 10, 0, 100]}
>>> id2newvp_dic
{'sid1': [31, 40], 'sid2': [101, 110], 'sid3': [81, 90]}
>>> refid_dic
{'id1': 1}
>>> id2extrow_dic
{'sid1': 'id1\\t0\\t140\\tsid1\\t0\\t+', 'sid2': 'id1\\t10\\t200\\tsid2\\t0\\t+', 'sid3': 'id1\\t10\\t200\\tsid3\\t0\\t-'}
>>> extlen_dic
{'sid1': 140, 'sid2': 190, 'sid3': 190}
"""
# Checks.
assert id2bedrow_dic, "given id2bedrow_dic empty"
assert str_ext > 0 and str_ext <= 500, "provide reasonable str_ext"
# Part lengths dictionary.
site2newl_dic = {}
# Get new part lengths for each site.
for site_id in id2bedrow_dic:
cols = id2bedrow_dic[site_id].split("\t")
seq_id = cols[0]
assert seq_id in chr_len_dic, "sequence ID %s not in chr_len_dic" %(seq_id)
ref_s = int(cols[1])
ref_e = int(cols[2])
site_id = cols[3]
site_sc = cols[4]
ref_pol = cols[5]
site_len = ref_e - ref_s
site_vp_s = 1
site_vp_e = site_len
if id2ucr_dic:
assert site_id in id2ucr_dic, "site ID %s not in id2ucr_dic" %(site_id)
site_vp_s = id2ucr_dic[site_id][0]
site_vp_e = id2ucr_dic[site_id][1]
site_vp_len = site_vp_e - site_vp_s + 1
us_con_ext = site_vp_s - 1
ds_con_ext = site_len - site_vp_e
us_site_ext = str_ext
ds_site_ext = str_ext
ref_ext_s = ref_s - us_site_ext
ref_ext_e = ref_e + ds_site_ext
# Check for ends.
ref_len = chr_len_dic[seq_id]
if ref_ext_s < 0:
us_site_ext += ref_ext_s
ref_ext_s = 0
if ref_ext_e > ref_len:
diff = ref_ext_e - ref_len
ds_site_ext = ds_site_ext - diff
ref_ext_e = ref_len
# Minus case, flip lengths.
if ref_pol == "-":
us_ext = ds_site_ext
ds_ext = us_site_ext
us_site_ext = us_ext
ds_site_ext = ds_ext
# Checks.
ref_ext_l = ref_ext_e - ref_ext_s
new_site_l = us_site_ext + us_con_ext + site_vp_len + ds_con_ext + ds_site_ext
assert ref_ext_l == new_site_l, "ref_ext_l != new_site_l (%i != %i) for site ID %s" %(ref_ext_l, new_site_l, site_id)
# Store extended BED rows.
if id2extrow_dic is not None:
id2extrow_dic[site_id] = "%s\t%i\t%i\t%s\t0\t%s" %(seq_id, ref_ext_s, ref_ext_e, site_id, ref_pol)
# Store new site viewpoint start+end.
if id2newvp_dic is not None:
new_vp_s = site_vp_s + us_site_ext
new_vp_e = site_vp_e + us_site_ext
id2newvp_dic[site_id] = [new_vp_s, new_vp_e]
# Store reference IDs.
if refid_dic is not None:
refid_dic[seq_id] = 1
if extlen_dic is not None:
extlen_dic[site_id] = ref_ext_l
# Store new part lengths.
site2newl_dic[site_id] = [us_site_ext, us_con_ext, site_vp_len, ds_con_ext, ds_site_ext]
assert site2newl_dic, "nothing stored inside site2newl_dic"
return site2newl_dic
################################################################################
def calc_ext_str_features(id2bedrow_dic, chr_len_dic,
out_str, args,
check_seqs_dic=False,
stats_dic=None,
tr_regions=False,
tr_seqs_dic=False):
"""
Calculate structure features (structural element probabilities)
by using extended sequences, and then prune them to match
remaining feature lists.
id2bedrow_dic:
Site ID to BED region (tab separated)
chr_len_dic:
Reference sequence lengths dictionary.
out_str:
Output .str file.
args:
Arguments from rnaprot gt / gp.
check_seqs_dic:
Center sequences to compare to extended and truncated ones.
Should be the same after extension, structure calculation, and
truncation.
stats_dic:
For .html statistics.
tr_regions:
Are we dealing with transcript regions?
tr_seqs_dic:
If tr_regions supplied, transcript sequences need to be supplied
as well.
"""
assert id2bedrow_dic, "id2bedrow_dic empty"
assert chr_len_dic, "chr_len_dic empty"
print("Extend sequences by --plfold-w for structure calculations ... ")
# Get extended parts and infos.
id2newvp_dic = {} # viewpoint region coords on extended sequence (1-based).
id2extrow_dic = {} # Extended sites BED on reference.
extlen_dic = {} # Extended lengths of sites.
refid_dic = {} # Reference IDs.
id2newl_dic = get_ext_site_parts(id2bedrow_dic, chr_len_dic,
str_ext=args.plfold_w,
id2ucr_dic=False,
refid_dic=refid_dic,
extlen_dic=extlen_dic,
id2extrow_dic=id2extrow_dic,
id2newvp_dic=id2newvp_dic)
# Checks.
assert id2extrow_dic, "id2extrow_dic empty"
# tmp files.
random_id = uuid.uuid1()
tmp_fa = str(random_id) + ".tmp.fa"
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
random_id = uuid.uuid1()
tmp_str_out = str(random_id) + ".tmp.str"
# If transcript regions.
if tr_regions:
# Checks.
assert tr_seqs_dic, "tr_seqs_dic empty"
for ref_id in refid_dic:
assert ref_id in tr_seqs_dic, "reference ID %s not in tr_seqs_dic" %(ref_id)
# Get extended sequences.
seqs_dic = extract_transcript_sequences(id2extrow_dic, tr_seqs_dic)
# Write sequences to FASTA.
fasta_output_dic(seqs_dic, tmp_fa)
else:
# Genomic regions.
bed_write_row_dic_into_file(id2extrow_dic, tmp_bed)
# Extract sequences.
bed_extract_sequences_from_2bit(tmp_bed, tmp_fa, args.in_2bit)
# Check extracted sequences, replace N's with random nucleotides.
polish_fasta_seqs(tmp_fa, extlen_dic,
vp_check_seqs_dic=check_seqs_dic,
vp_dic=id2newvp_dic)
calc_str_elem_p(tmp_fa, tmp_str_out,
stats_dic=stats_dic,
plfold_u=args.plfold_u,
plfold_l=args.plfold_l,
plfold_w=args.plfold_w)
print("Post-process structure files ... ")
# Refine elem_p.str.
str_elem_p_dic = read_str_elem_p_into_dic(tmp_str_out,
p_to_str=True)
assert str_elem_p_dic, "str_elem_p_dic empty"
SEPOUT = open(out_str,"w")
for site_id in str_elem_p_dic:
us_ext = id2newl_dic[site_id][0]
ds_ext = id2newl_dic[site_id][4]
# Checks.
len_ll = len(str_elem_p_dic[site_id])
total_ext = us_ext + ds_ext
assert len_ll > total_ext, "len_ll <= total_ext for site ID %s" %(site_id)
if ds_ext:
new_ll = str_elem_p_dic[site_id][us_ext:-ds_ext]
else:
# If ds_ext == 0.
new_ll = str_elem_p_dic[site_id][us_ext:]
assert new_ll, "new_ll empty for site ID %s (us_ext = %i, ds_ext = %i, len_ll = %i)" %(site_id, us_ext, ds_ext, len_ll)
SEPOUT.write(">%s\n" %(site_id))
for l in new_ll:
s = "\t".join(l)
SEPOUT.write("%s\n" %(s))
SEPOUT.close()
# Remove tmp files.
if os.path.exists(tmp_fa):
os.remove(tmp_fa)
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
if os.path.exists(tmp_str_out):
os.remove(tmp_str_out)
################################################################################
def polish_fasta_seqs(in_fa, len_dic,
vp_dic=False,
vp_check_seqs_dic=False,
report=False,
repl_alphabet=["A","C","G","U"]):
"""
Read in FASTA sequences, check lengths, and replace N's with
nucleotide characters from repl_alphabet. Overwrite original in_fa
with polished content.
"""
import random
assert len_dic, "len_dic empty"
# Read in FASTA (do not skip N containing extensions here).
seqs_dic = read_fasta_into_dic(in_fa,
skip_n_seqs=False)
assert len(seqs_dic) == len(len_dic), "len(seqs_dic) != len(len_dic)"
FAOUT = open(in_fa,"w")
for seq_id in seqs_dic:
seq = seqs_dic[seq_id].upper()
assert seq_id in len_dic, "sequence ID %s not in seqs_dic" %(seq_id)
assert len(seq) == len_dic[seq_id], "sequence length != len_dic length (%i != %i)" %(len(seq), len_dic[seq_id])
new_seq = seq
if re.search("N", seq):
if report:
print("WARNING: N nucleotides encountered for sequence ID %s. Apply polishing ... " %(seq_id))
new_seq = ""
for c in seq:
new_c = c
if c == "N":
new_c = random.choice(repl_alphabet)
new_seq += new_c
if vp_dic:
assert seq_id in vp_dic, "sequence ID %s not in vp_dic" %(seq_id)
vp_s = vp_dic[seq_id][0]
vp_e = vp_dic[seq_id][1]
new_seq = update_sequence_viewpoint(new_seq, vp_s, vp_e)
if vp_check_seqs_dic:
assert seq_id in vp_check_seqs_dic, "sequence ID %s not in vp_check_seqs_dic" %(seq_id)
vp_seq1 = seq_get_vp_region(new_seq)
vp_seq2 = seq_get_vp_region(vp_check_seqs_dic[seq_id])
assert vp_seq1, "uppercase sequence region extraction failed for vp_seq1 (ID: %s, seq: %s)" %(seq_id, new_seq)
assert vp_seq2, "uppercase sequence region extraction failed for vp_seq2 (ID: %s, seq: %s)" %(seq_id, vp_check_seqs_dic[seq_id])
assert vp_seq1 == vp_seq2, "vp_seq1 != vp_seq2 for ID %s (\"%s\" != \"%s\")" %(seq_id, vp_seq1, vp_seq2)
FAOUT.write(">%s\n%s\n" %(seq_id,new_seq))
FAOUT.close()
################################################################################
def conv_ch_info_dic(ch_info_dic,
conv_mode=1):
"""
Convert ch_info_dic, storing channel infos.
conv_mode:
If 1, convert from sequence embed to one-hot.
If 2, convert from one-hot to embed.
Example ch_info_dic format with embedded "fa":
ch_info_dic: {'fa': ['C', [0], ['embed'], 'embed'],
'CTFC': ['C', [1, 2], ['0', '1'], 'one_hot'],
'pc.con': ['N', [3], ['phastcons_score'], 'prob'],
'pp.con': ['N', [4], ['phylop_score'], 'minmax2'],
'rra': ['C', [5, 6], ['N', 'R'], '-'],
'str': ['N', [7, 8, 9, 10, 11], ['E', 'H', 'I', 'M', 'S'], 'prob'],
'tra': ['C', [12, 13, 14, 15, 16, 17, 18, 19, 20], ['A', 'B', 'C', 'E', 'F', 'N', 'S', 'T', 'Z'], '-']}
>>> ch_info_dic = {'fa': ['C', [0], ['embed'], 'embed'], 'rra': ['C', [1, 2], ['N', 'R'], '-']}
>>> new_ch_info_dic = conv_ch_info_dic(ch_info_dic, conv_mode=1)
>>> new_ch_info_dic
{'fa': ['C', [0, 1, 2, 3], ['A', 'C', 'G', 'U'], 'one_hot'], 'rra': ['C', [4, 5], ['N', 'R'], '-']}
>>> conv_ch_info_dic(new_ch_info_dic, conv_mode=2)
{'fa': ['C', [0], ['embed'], 'embed'], 'rra': ['C', [1, 2], ['N', 'R'], '-']}
"""
assert ch_info_dic, "given ch_info_dic empty"
ch_idx = 0
for fid in ch_info_dic:
feat_type = ch_info_dic[fid][0] # C or N.
feat_idxs = ch_info_dic[fid][1] # channel numbers occupied by feature.
#feat_alphabet = ch_info_dic[fid][2]
#feat_encoding = ch_info_dic[fid][3]
if fid == "fa":
if conv_mode == 1:
ch_info_dic[fid][1] = [0, 1, 2, 3]
ch_info_dic[fid][2] = ['A', 'C', 'G', 'U']
ch_info_dic[fid][3] = "one_hot"
for i in range(4):
ch_info_dic[fid][1][i] = ch_idx
ch_idx += 1
else:
ch_info_dic[fid][1] = [0]
ch_info_dic[fid][2] = ["embed"]
ch_info_dic[fid][3] = "embed"
ch_info_dic[fid][1][0] = ch_idx
ch_idx += 1
else:
for i,feat_idx in enumerate(feat_idxs):
ch_info_dic[fid][1][i] = ch_idx
ch_idx += 1
return ch_info_dic
################################################################################
def conv_embed_feature_list(feat_list,
l1d=False):
"""
Assuming feature list with first feature being sequence embedding,
convert to new feature list with one-hot encoding as first 4 features.
l1d:
If input is 1D-list, not 2D.
>>> feat_list = [[3, 0, 1, 0.2], [4, 1, 0, 0.5]]
>>> conv_embed_feature_list(feat_list)
[[0, 0, 1, 0, 0, 1, 0.2], [0, 0, 0, 1, 1, 0, 0.5]]
>>> feat_list = [3, 0, 1, 0.2]
>>> conv_embed_feature_list(feat_list, l1d=True)
[0, 0, 1, 0, 0, 1, 0.2]
"""
assert feat_list, "given feat_list empty"
conv_dic = {1: [1,0,0,0], 2: [0,1,0,0], 3: [0,0,1,0], 4: [0,0,0,1]}
new_feat_list = []
if l1d:
new_fl = conv_dic[feat_list[0]] + feat_list[1:]
return new_fl
else:
for fl in feat_list:
new_fl = conv_dic[fl[0]] + fl[1:]
new_feat_list.append(new_fl)
return new_feat_list
################################################################################
def seq_get_vp_region(seq):
"""
Get viewpoint (uppercase region) from a sequence.
>>> seq_get_vp_region("acguAACCGGacgu")
'AACCGG'
>>> seq_get_vp_region("ACGU")
'ACGU'
"""
assert seq, "seq empty"
vp_seq = False
m = re.search("[acgun]*([ACGUN]+)", seq)
if m:
vp_seq = m.group(1)
return vp_seq
################################################################################
def drop_a_line(theme=1):
"""
Drop a line.
"""
ref = """
/$$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$$ /$$$$$$$ /$$$$$$ /$$$$$$$$
| $$__ $$| $$$ | $$ /$$__ $$| $$__ $$| $$__ $$ /$$__ $$|__ $$__/
| $$ \ $$| $$$$| $$| $$ \ $$| $$ \ $$| $$ \ $$| $$ \ $$ | $$
| $$$$$$$/| $$ $$ $$| $$$$$$$$| $$$$$$$/| $$$$$$$/| $$ | $$ | $$
| $$__ $$| $$ $$$$| $$__ $$| $$____/ | $$__ $$| $$ | $$ | $$
| $$ \ $$| $$\ $$$| $$ | $$| $$ | $$ \ $$| $$ | $$ | $$
| $$ | $$| $$ \ $$| $$ | $$| $$ | $$ | $$| $$$$$$/ | $$
|__/ |__/|__/ \__/|__/ |__/|__/ |__/ |__/ \______/ |__/
██████ ███ ██ █████ ██████ ██████ ██████ ████████
██ ██ ████ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
██████ ██ ██ ██ ███████ ██████ ██████ ██ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
██ ██ ██ ████ ██ ██ ██ ██ ██ ██████ ██
██████╗ ███╗ ██╗ █████╗ ██████╗ ██████╗ ██████╗ ████████╗
██╔══██╗████╗ ██║██╔══██╗██╔══██╗██╔══██╗██╔═══██╗╚══██╔══╝
██████╔╝██╔██╗ ██║███████║██████╔╝██████╔╝██║ ██║ ██║
██╔══██╗██║╚██╗██║██╔══██║██╔═══╝ ██╔══██╗██║ ██║ ██║
██║ ██║██║ ╚████║██║ ██║██║ ██║ ██║╚██████╔╝ ██║
╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝
╦═╗╔╗╔╔═╗╔═╗╦═╗╔═╗╔╦╗
╠╦╝║║║╠═╣╠═╝╠╦╝║ ║ ║
╩╚═╝╚╝╩ ╩╩ ╩╚═╚═╝ ╩
::::::::: :::: ::: ::: ::::::::: ::::::::: :::::::: :::::::::::
:+: :+: :+:+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+:
+:+ +:+ :+:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+
+#++:++#: +#+ +:+ +#+ +#++:++#++: +#++:++#+ +#++:++#: +#+ +:+ +#+
+#+ +#+ +#+ +#+#+# +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+
#+# #+# #+# #+#+# #+# #+# #+# #+# #+# #+# #+# #+#
### ### ### #### ### ### ### ### ### ######## ###
____ _ _____ ____ ____ ____ ______
/ __ \/ | / / | / __ \/ __ \/ __ \/_ __/
/ /_/ / |/ / /| | / /_/ / /_/ / / / / / /
/ _, _/ /| / ___ |/ ____/ _, _/ /_/ / / /
/_/ |_/_/ |_/_/ |_/_/ /_/ |_|\____/ /_/
\"Let's Party!\"
\"I eat Green Berets for breakfast.\"
\"There's always barber college.\"
\"Yo, Steve! You're history.\"
::::::::: :::: ::: ::: ::::::::: ::::::::: :::::::: :::::::::::
:+: :+: :+:+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+:
+:+ +:+ :+:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+
+#++:++#: +#+ +:+ +#+ +#++:++#++: +#++:++#+ +#++:++#: +#+ +:+ +#+
+#+ +#+ +#+ +#+#+# +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+
#+# #+# #+# #+#+# #+# #+# #+# #+# #+# #+# #+# #+#
### ### ### #### ### ### ### ### ### ######## ###
\"Let's Party!\"
\"I eat Green Berets for breakfast.\"
\"There's always barber college.\"
\"Yo, Steve! You're history.\"
"""
lines = []
a = """
\"Let's Party!\"
"""
b = """
\"I eat Green Berets for breakfast.\"
"""
c = """
\"There's always barber college.\"
"""
d = """
\"Yo, Steve! You're history.\"
"""
lines.append(a)
lines.append(b)
lines.append(c)
lines.append(d)
return(random.choice(lines))
################################################################################
################################################################################
"""
"We could not understand because we were too far and could not remember
because we were travelling in the night of first ages, of those ages
that are gone, leaving hardly a sign - and no memories." J.C.
^OMQQOO6|^OQQM6MOMMMOQQQQMMMMOMIQMQMQOOO6QO6O6QQMQQQMO6QMMOOQMMQQMQQOI66IOOQQMMM|QQO66
OOQQM!I|6OMOQMOQOQQQOMQQOMMMMQMQQMQOQOOQI^QOMOQMQMMQMMOOQOQQOQMQMQMMOOOOQQMOMQMQOQQQOO!
. .MOQQQM6OOQMMMMMMMOMQQQ66IQMMMMMQQOOMOIOO^6|QQMMQMQMOMQOOQOQQOOMQQMM6IOOQQMMQMMQMQQOQ6I
.|. I6Q6MQQMQOMMMMMMMMMMMMOO|6MMQQMMQ!O6MO|IQ6O6MMMMQMMI6I6QMQOQMOO6MMQQQMI6QOQQQMMMMQQOQOO
6|IQQOMMMMMMMMMMMMMQMQMO6O6MM6MQO|IOMOO6IMQQQMMMMQOOOQ6MQQMQMMMMQMMMMQMMQQQQMMQQQMMQOQOO
..Q^OIIOMMMQQMQMMMMMMMMMMMQ6OOQOOQQQQ|MQQOQMMQMMMQMQO6|6OOMMQMMMMMMMMMMMMQMMMQOMMMQMMQQQQMQ|
|! QOMO6MMMQQ6QMQMMMMMQMMMQQMMQQQMMOQO||OQOQOMMMMMMOMQ6OQQMMMMMQMMMMQMMMMMMMQQMOQQQMMMMQMMQQ!^
OQMQQMMMMMMMQOQMMMQMMMMMQMOQOM6OQQMQ!!I66OOO66QMMMOQ|QOOMMMMMMMMMMMMMMMMMMMMMMMMQMMMMMMQMQOQOO ..
. !6 6MMMMMMMMMMMMQMMQQMQMQMMMMMQQQQOMO^I|IQOO^!|6QMMQOOO6OQMMMMMMMMQMMMMMMMMMMMMMMMMMMQMMQMOMQII^ Q^
.| . .Q6MMMMMMIQMMMQQMMMOQMQMMMMQMQMOQQ6I6!6QO|..^|6OQOOI6OMMQMQMMMMMMMMMQQ6I6OMQMMMQMMMMMQMMQQQMQQO6
M.OI666MMMMQOQMMQ6OMQOQQMMQQQMQMMQQQO!.OQQ|O..^!IOIIIOOQMMOQMQMMMMMMMQMMMMMMMMQQMQMMMMMMMMMQQ6OQMQ
QM6MMMMQMQQMMMMOMOQQMQMMMMMM6OQMO6|OI66O|^^!||II!^^!!6O6O6QMMMQMMMMMQQMQMMMMMMMMQI6QMMMMOQMIM.QQQ
|MMMOMQQ6MMMMQQMMMQQMMMMMMQOQ6|^!|6OI6O|!!|6I|!!^^^^!|I6OOOMQMQMMOQMMMMMMMMQMMQMQMQMMMMMQMQ|!6QII
O .M66MOQMMMMMMMMOMQMMMMQMOQO6I6|^I6I||666III|!^^.^^.^!|I6OOQMQMMMQQQMMMMMMOOOMMQMQMMMMMMMQMQQ QM
I.6Q6OOOQQMMMMMMMQMMMMMMMMOQ6II6O66QQOOQOQOOI!^^^^.^^!I6OMQQMQQQMQQMMQMMMMMMQOMMMQMMMMMMMMQMMMQMOI
66MQQQOQQ6MMMMMOMMMMMMQMMQ6OO6OOMMQMMMMQMMOO6|^^.^^^!6QQMMMMMMMQMMMQQQMMMMMMQMQQQMMMMMMMMMMMMMMQQOI
|O^ QMQOQQOQ66QMQOOMQMQMQMMM6QO6666OO6OQMMO6OMQQI^.^.!|6QMMMMMMMMMOQMMQMMMMQQQQMMQQMMMMMMMMMMMMMMQMMOQ
|MQMQOMOOOO6QMQMMMMMMMMMQQM|^^.!6^^..I|^I6OOII....!6MMMMMQI!66II6OQMQOOQOQQM6OMQMMMMMMMMMMMMMMMQMQO^
66QQMMQQOQQMMMOQMMMMMMQMMQQMO!.. ..!!^!|||I!^^!^. .^!6OO66II|I6|66OO6666O6OQMQMMQMMMMMMMMMMQMMMMMMMQQ.
^ OMMMQOQ6QOMOQMMMMMQMMMMQQM|^. . ...........^^.. .!66!|!^^^.^^!|!!|!||6OOQMMMMMMMMMMMMMMMMMMMMMMQMQ.
QQMMMMOQMQQOOMQ6MQOOQOQMMM|^.. .. .. .. .^.... ^!I|!^!^..^^.^^^^^!|I66OQMQQMMMMMMMMQMMMMMMMMMMQMOO
OOOMMMQMMMMQMQQQMMMMMMMQMQ!........ . . ...^.. .!I|I!^^.^^^^^^!^^!!|6OQQMMMMOOMMQMQMMMMMMMMMMMQM QO .
6|I6MMQMMQMOQQQMMMQMMMMMM6!^^^....... . ..^.^.^ ..^II!|!^^^^^^^!^!^!|6OOOQMQMMMMMMMMMMMMMMMMMQQMIMQ M^
I .66QQQMMMMQOOO6QQMMMMMMO|!^^...^.... ...^^^.^..^!I6|I|^^^^^^^!!^!|I6OOOQOQQMMMMMMMMQMMMMMOMMMQM OQ.6
.6 .6OOMMMMMQMMMMQMMMMMQQ|!^^^.^..^......^.^.^. ^!II|I6^^^^^!^!!!|IIOOOQOMMMMMMMMMMMMMMMMMMMMMOM| !M Q
O|Q. .MMMMQMMMMMMMMMMMMQQ|!^.^..^^..^...^.^^..^.^!||||I^.^!^^!||!II6OQQQQMOMMMMMMMMMMMMMMMMMMMMQO |I|6
. .Q| ^^OMMMMMMMMMMMMMMMMMM|!^!^^^..^^...^.6QMI!^^!6OOQOO|^.^^^!!||6O6OQOMMMMMMMQMMMMMMMMMMMQMMMMQO Q.Q
! 6.QOQMQQMMMMMMMMMMMMMMQMQ!!!^^^^^..^.....!6||OIOOQMMMMM6^^^^!^!!|6OOOQQMMMMMMMMMMMMMMQQMMMQMOMQOI Q|!.
IOM. OMMMMMMMMMMMMQMQMM!!!!!^.^^... ...^^!^^IQMMMMQO6|^^^!!||I66OQOQQQMMMMMMMMMMMMMMMMMOQ6I|OMI QQQ
I^|6O.QMMMMMMMMMMMMMMMMM!^!||^^^... ...^^^^..!III|6I|^^^^^^!|I|66QQQOQQMMMMMMMMMMMMMMM|QOOI6Q6| |M Q
!.. I ^QMMMMMMMMMMMMMMMMMMMQ!^^||!!.^.^.^^...^..^^^..^!!^!^.^!!||6OOQOQOOQMMMMMMMMMMQMMQQOQMQQII|.|6Q.O^
.^^|IO6QOQMMMMQMMMMMMMMMMMMMQM|!!!!|!^^^.^^^..^.!^....|||^!!^!!|IIO6OQMQQOQMMMMMMMMMQQMQMQMMMQQMOI6QOM^
^ 66OQQQQMMMMMQOQMMMMMMMMMMMM|!!!||^!!!6QMQQQ66QOMMMMMMMQQQQQO6OOQQQQOOQQMMMMMMMMQQOQQMQQOOQMQQOO!O
! O6Q6QMMQMMOMOMMMMMMMMMMMMMM|!!|I|!I!!^^.^^^. .....^|!|||III6OQOQQQOQQMMMMMMMMMMMQQMQMMQMMMQQMM|
OQQOQQQQO66QMMMMMMMQMMMMMMMM6|!|II!!^!^!^^!.^.^^^^|||6OO66OQOOQMQOQQMMMMMMQMMMQMQMMMQMQMMQMQ.M
QOM..^IQQMMQMMM!QMMMMMMMMMMMQI!||II|||6OOQQOOQOQQQQMQQQQOOOQQQQMQMMMMMMMMMMMQQQMOMQMO^OO QM.
6MQQOQQMMMMMIOMMMMMMMMMMMMMQO||!!||||!|I6OQQMQQMMQO6O6OOQQQMQQMMMMMMMMMMMMMMMQOMMMMM6QM
. QQQM6QMMMMMMMOQQMMMOOQ6||!^!^^....^^!^I|I!||I|OOOQQQMMMMMMMMMMMMMQMMMMMMQIIQOM6 .^^!^!^^^!
|OQMOQMMMMMQMM6QMMMIOOQQ|!!!!^^^^^^!!^^!!!|6I6OOQOMMMMMMMMMMMMMMMMMMMMMMMMMQM6! .|I|!!!^^^^^^^^^.^.^^
MMMMIMQMMMQMQ6QMMM6666OOI||!!!!!^||!|!|I|6IIOQQMMMMMMMMMMMMMMMMMMMMMMMQMOQMM6. QO6|!^^^.^^.^..^...^^...
QQMQQMMQMQIOOOQMMMOI6II6OO6I|IIIIII6II6II6OOQQMMMMMMMMMMMMMMMMMMMMMMMMMMMMM. IQO6!^^^.^..^..^...^...^^.^
MQOQMQMMMQQOMMMMMQ|I||I6OOQQOOQQOOOOQQQQQQMMMMMMMMMMMMMMQMMMMMMMMQMMMMMMMMI 6O66|!^.^^..^...........^...^
.^^^^ |I!MMMMQMMMQ6MOMMMMMIII|||I6OOQMMMQMMMQMMMMMMMMMMMMMMMMQQQQQMMMMMMMMMMMMMMMMQMO6QO6I|..^.........^......^.^..^
^^^.^^^^^^^^ !QMMMMQOMMQQQMQMMO||!!!|IOOOQMQMQMMMMMMQMMMMMMMMMMMQQOOOOQQMQQMMMMMMMMMMMMQQQO6I!^.^...............^.....^^
. .^...^^..^^^^^ .^|Q6MOMMMMQQMQI!!|!^!|II66OQQQQMQMQMQMMMMMMMMMQQOOOOOOOOOQ6OQMQQQMMMMQMMQ6||!^.......... ........!..^^.^
.........^.^.^!^^|I! ^OMMQMMMMOQ!!!!!^!^|!|66OQQQQQMQMMMMMMMMMMQOQO6O6O66OOOQ6O66QQOOQQ6I|!!^....... .... .......^..^.^^!
.. . .... ....^.^^^!|III6| 6QQQOQ6!!!!^^!!^!|I66OQQQMQQMMMMMMMMQQOOOOO6O66666^QI66I66I|!^^^...^^.^.^^^...... . ......^^.^!^!
.. . ... ......^^^^!!||II6|. !QMMMMOQOI|^^^!^!^!!!!I666QOQQOQMQMQMQQOOO66O6666I6III6|IIIII||!^!^^^!!!^.^.......... .....^..^.^!^!
. . . .. .....^.^^^^!^!||||6O6OMQMMQMOQ6I!!!^!^^^^^^!^|I66O6OOOOQOOOO6666I6I6IIII|I 6I|I||I|I||!!!!!!^^^^..^................^.^^^^!
. . . .. . ......^^^!!!!^!!!|IOOQQQ6Q6|||!!^!!^^^^^^!^!||II666O6666666I666II|I!|||IO|!|I|!||!!!^!^^^^^^....... this is the end
beautiful friend ...
"""
|
import numpy as np
from rlkit.torch.core import eval_np
def marollout(
env,
agent_n,
max_path_length=np.inf,
render=False,
render_kwargs=None,
shared_obs=False,
shared_encoder=None,
shared_groups=None,
collect_raw_actions=False,
):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
"""
if render_kwargs is None:
render_kwargs = {}
num_agent = len(agent_n)
observations = []
actions = []
if collect_raw_actions:
raw_actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
# env_infos = dict()
o_n = env.reset()
[agent.reset() for agent in agent_n]
next_o = None
path_length = 0
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
a_n, agent_info_n = [],{}
if collect_raw_actions:
ra_n = []
if shared_encoder:
if shared_groups:
o_emb_n = [eval_np(sub_shared_encoder,o_n[None,:])[0] for sub_shared_encoder in shared_encoder]
else:
o_emb_n = eval_np(shared_encoder,o_n[None,:])[0]
for i,agent in enumerate(agent_n):
if shared_encoder:
if shared_groups:
o = o_emb_n[shared_groups[i]][i]
else:
o = o_emb_n[i]
elif shared_obs:
o = o_n
else:
o = o_n[i]
a, agent_info = agent.get_action(o)
a_n.append(a)
for key in agent_info.keys():
agent_info_n[key+' '+str(i)] = agent_info[key]
if collect_raw_actions:
ra_n.append(agent_info['raw_action'])
next_o_n, r_n, d_n, env_info = env.step(a_n)
observations.append(o_n)
rewards.append(r_n)
terminals.append(d_n)
actions.append(a_n)
if collect_raw_actions:
raw_actions.append(ra_n)
agent_infos.append(agent_info_n)
env_infos.append(env_info)
# for key in env_info.keys():
# if key in env_infos.keys():
# env_infos[key].append(env_info[key])
# else:
# env_infos[key] = [env_info[key]]
path_length += 1
if d_n.all():
break
o_n = next_o_n
if render:
env.render(**render_kwargs)
actions = np.array(actions)
if len(actions.shape) == 2:
actions = np.expand_dims(actions, 2)
if collect_raw_actions:
raw_actions = np.array(raw_actions)
if len(raw_actions.shape) == 2:
raw_actions = np.expand_dims(raw_actions, 2)
observations = np.array(observations)
if not shared_obs:
if len(observations.shape) == 2:
observations = np.expand_dims(observations, 2)
next_o_n = np.array(next_o_n)
if len(next_o_n.shape) == 1:
next_o_n = np.expand_dims(next_o_n, 1)
next_observations = np.vstack(
(
observations[1:, :, :],
np.expand_dims(next_o_n, 0)
)
)
else:
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o_n, 0)
)
)
path = dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, num_agent, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, num_agent, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
if collect_raw_actions:
path['raw_actions'] = raw_actions
return path |
# SPDX-FileCopyrightText: 2022 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ""
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"django.contrib.humanize",
"django_countries",
"phonenumber_field",
"allauth",
"allauth.account",
"allauth.socialaccount",
"django_bootstrap5",
"django_extensions",
"hackdb",
"apikeys",
"datarequest",
"discorduser",
"groupadmin",
"ldapsync",
"mailman2",
"membership",
"motd",
"nfctokens",
"posixusers",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"apikeys.middleware.APIKeyMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "hackdb.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["hackdb/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
# `allauth` needs this from django
"django.template.context_processors.request",
"motd.context_processors.motd_messages",
"mailman2.context_processors.mailman2_prompts",
],
},
},
]
WSGI_APPLICATION = "hackdb.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
# This isn't a validator. It captures the changed password to store an alternative hash.
{
"NAME": "posixusers.password_validation.UpdatePosixPassword",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-gb"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = "static/"
STATIC_ROOT = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
SITE_ID = 1
LOGIN_REDIRECT_URL = "home"
# HMAC confirmations don't obey the cooldown period
ACCOUNT_EMAIL_CONFIRMATION_HMAC = False
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = "account_email"
SOCIALACCOUNT_PROVIDERS = {}
COUNTRIES_FIRST = ["GB"]
PHONENUMBER_DEFAULT_REGION = "GB"
POSIXUSER_UID_MIN = 2000000000
POSIXUSER_UID_MAX = 2999999999
POSIXUSER_GID_MIN = 3000000000
POSIXUSER_GID_MAX = 3999999999
POSIXUSER_ID_MODE = "random" # next, random
MAILMAN_URL = ""
MAILMAN_API_URL = ""
MAILMAN_API_USERNAME = ""
MAILMAN_API_PASSWORD = ""
MAILMAN_ENABLE_INTERACTIVE_CHANGES = False
MAILMAN_ENABLE_ADDRESS_CHANGES = False
MAILMAN_ENABLE_AUTO_SUBSCRIBE = False
MAILMAN_ENABLE_AUTO_UNSUBSCRIBE = False
LDAPSYNC_HOST = ""
LDAPSYNC_PORT = 636
LDAPSYNC_USE_SSL = True
LDAPSYNC_TLS = None
LDAPSYNC_USER = ""
LDAPSYNC_PASSWORD = ""
LDAPSYNC_BASE_DN = None
LDAPSYNC_USERS_BASE_DN = None
LDAPSYNC_GROUPS_BASE_DN = None
LDAPSYNC_POSIX_GROUPS_BASE_DN = None
LDAPSYNC_DOMAIN_SID = None
LDAPSYNC_DRY_RUN = True
NFCTOKENS_USER_ENABLED_LIMIT = 10
NFCTOKENS_USER_TOTAL_LIMIT = 20
NFCTOKENS_LOG_RETENTION_DAYS = 30
|
<filename>utils.py
import os
import numpy as np
import matplotlib.colors as colors
from scipy.io import loadmat
import json
from xml.dom import minidom
import rasterio
import subprocess
from osgeo import ogr, osr
from geojson import Polygon
from datetime import datetime
def clip_tiff_by_shapefile(tiff_file, shapefile):
out_file = tiff_file[:-4] + '_clip.tif'
# XXX: hacky, eventually port to native gdal
command = ["gdalwarp", "-dstnodata", "nan", "-cutline", shapefile, tiff_file, out_file]
try:
output = subprocess.check_output(command)
except:
print("Clipping " + tiff_file + " failed!")
return out_file
def convert_mat_to_json(filename, outfilename, source_epsg=32611, target_epsg=4326):
mat = loadmat(filename)
X = mat['xb'][0]
Y = mat['yb'][0]
ring = ogr.Geometry(ogr.wkbLinearRing)
for x, y in zip(X, Y):
ring.AddPoint(x, y)
polygon = ogr.Geometry(ogr.wkbPolygon)
polygon.AddGeometry(ring)
source = osr.SpatialReference()
source.ImportFromEPSG(source_epsg)
target = osr.SpatialReference()
target.ImportFromEPSG(target_epsg)
transform = osr.CoordinateTransformation(source, target)
polygon.Transform(transform)
for i in range(0, polygon.GetGeometryCount()):
point = polygon.GetGeometryRef(i)
point.FlattenTo2D()
aoi = json.loads(polygon.ExportToJson())
with open(outfilename, 'w') as f:
json.dump(aoi, f)
def convert_mat_to_aoi_bbox(filename, buf=1000, source_epsg=32611, target_epsg=4326):
mat = loadmat(filename)
x = mat['xb']
y = mat['yb']
xmax = x.max() + buf
xmin = x.min() - buf
ymax = y.max() + buf
ymin = y.min() - buf
# XXX: OGR/Planet API expects ring of points defining polygon
bbox = [(xmax, ymax), (xmax, ymin), (xmin, ymin), (xmin, ymax), (xmax, ymax)]
ring = ogr.Geometry(ogr.wkbLinearRing)
for p in bbox:
ring.AddPoint(p[0], p[1])
polygon = ogr.Geometry(ogr.wkbPolygon)
polygon.AddGeometry(ring)
source = osr.SpatialReference()
source.ImportFromEPSG(source_epsg)
target = osr.SpatialReference()
target.ImportFromEPSG(target_epsg)
transform = osr.CoordinateTransformation(source, target)
polygon.Transform(transform)
for i in range(0, polygon.GetGeometryCount()):
point = polygon.GetGeometryRef(i)
point.FlattenTo2D()
aoi = polygon.ExportToJson()
return aoi
def load_image(filename, metadata_filename):
with rasterio.open(filename) as src:
band_blue = src.read(1)
with rasterio.open(filename) as src:
band_green = src.read(2)
with rasterio.open(filename) as src:
band_red = src.read(3)
xmldoc = minidom.parse(metadata_filename)
nodes = xmldoc.getElementsByTagName("ps:bandSpecificMetadata")
coeff = {}
for node in nodes:
band_num = node.getElementsByTagName("ps:bandNumber")[0].firstChild.data
if band_num in ['1', '2', '3', '4']:
i = int(band_num)
value = node.getElementsByTagName("ps:reflectanceCoefficient")[0].firstChild.data
coeff[i] = float(value)
band_blue = band_blue*coeff[1]
band_green = band_green*coeff[2]
band_red = band_red*coeff[3]
return np.stack([band_red, band_blue, band_green], axis=-1)
def print_json(data):
print(json.dumps(data, indent=2))
def rfc3339(date_obj):
# XXX: Assumes date_obj is UTC +0
# TODO : TZ conversion
rfc_fmt = '%Y-%m-%dT%H:%M:%SZ'
return datetime.strftime(date_obj, rfc_fmt)
class MidpointNormalize(colors.Normalize):
"""
Taken from Planet tutorial by <NAME> and others
https://github.com/planetlabs/notebooks/blob/master/jupyter-notebooks/ndvi/ndvi_planetscope.ipynb
Original Credit: <NAME>, http://chris35wills.github.io/matplotlib_diverging_colorbar/
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
|
<filename>ptt_crawler/ptt_crawler_utils.py
import requests
from bs4 import BeautifulSoup as bs
from datetime import datetime
from datetime import timedelta
import copy
import time
import re
import sys
import urllib.parse
from hashlib import md5
cookies = {'over18': '1'}
ARTICLE_SCHEMA = {
'url': '',
"board": "",
"author": "",
"title": "",
"date": datetime.now(),
"content": "",
"reply": []
}
REPLY_SCHEMA = {
'tag': '',
"author": "",
"content": "",
"date": datetime.now(),
'hash': ''
}
BASE_URL = 'https://www.ptt.cc'
JS_CODE = "/* <![CDATA[ */!function(t,e,r,n,c,a,p){try{t=document.currentScript||function(){for(t=document.getElementsByTagName('script'),e=t.length;e--;)if(t[e].getAttribute('data-cfhash'))return t[e]}();if(t&&(c=t.previousSibling)){p=t.parentNode;if(a=c.getAttribute('data-cfemail')){for(e='',r='0x'+a.substr(0,2)|0,n=2;a.length-n;n+=2)e+='%'+('0'+('0x'+a.substr(n,2)^r).toString(16)).slice(-2);p.replaceChild(document.createTextNode(decodeURIComponent(e)),c)}p.removeChild(t)}}catch(u){}}()/* ]]> */"
def remove_email_protection(soup):
email_tags = soup.find_all('a', class_="__cf_email__")
for tag in email_tags:
data = tag['data-cfemail']
r = int('0x%s'%data[0:2], 16)
d = ''
for i in range(2, len(data), 2):
c = hex(int('0x%s'%data[i:i+2], 16) ^ r)[2:4]
d += '%'
d += c
new_tag = soup.new_tag("email")
new_tag.string = urllib.parse.unquote(d)
tag.replace_with(new_tag)
for tag in soup.find_all("email"):
tag.unwrap()
return soup
def clean(s):
pattern = re.compile('\s+')
return re.sub(pattern, ' ', s)
class Article:
def __init__(self, url, days):
global ARTICLE_SCHEMA
self._url = url
self._days = timedelta(days=days)
self._raw = ''
self._data = copy.deepcopy(ARTICLE_SCHEMA)
self.fetch()
self.parse()
def fetch(self):
global cookies
fail_cnt = 1
while True:
print('try %s %d time'%(self._url, fail_cnt))
fail_cnt += 1
try: r = requests.get(self._url, cookies=cookies, timeout=2)
except:
time.sleep(fail_cnt)
continue
if 400 <= r.status_code < 500 or fail_cnt > 10:
self._raw = None
return
if r.status_code != 200:
time.sleep(fail_cnt * 10)
continue
break
self._raw = r.text
def parse(self):
if not self._raw:
self._data = None
return
global REPLY_SCHEMA
soup = remove_email_protection(bs(self._raw, 'html.parser'))
article_metaline = soup.find_all('div', class_='article-metaline')
try: self._data['board'] = soup.find('div', class_='article-metaline-right').find('span', class_='article-meta-value').text.split()[-1]
except:
self._data = None
return
self._data['url'] = self._url
try: self._data['author'] = article_metaline[0].find('span', class_='article-meta-value').text
except: pass
try: self._data['title'] = article_metaline[1].find('span', class_='article-meta-value').text
except: pass
try: self._data['date'] = datetime.strptime(article_metaline[2].find('span', class_='article-meta-value').text, '%a %b %d %H:%M:%S %Y')
except: pass
if datetime.now() - self._data['date'] > self._days:
raise StopIteration
content = soup.find('div', id='main-content').text.split('\n')[4:]
content = content[:content.index('--') if '--' in content else None]
content = ' '.join(clean(line) for line in content)
content = content.replace(JS_CODE, '')
self._data['content'] = content
for idx, reply in enumerate(soup.find_all('div', class_='push')):
r = copy.deepcopy(REPLY_SCHEMA)
span = reply.find_all('span')
try: r['tag'] = span[0].text.strip()
except: pass
try: r['author'] = span[1].text.strip()
except: pass
try: r['content'] = clean(span[2].text)[1:]
except: pass
try: r['date'] = datetime.strptime(span[3].text.strip()+' %d'%self._data['date'].year, '%m/%d %H:%M %Y')
except: pass
r['hash'] = md5(('%s@%s@%s@%s@%d'%(r['author'], self._data['url'], r['date'], r['content'], idx)).encode()).hexdigest()
self._data['reply'].append(r)
class ArticleList:
def __init__(self, board, days):
global BASE_URL
self._board = board
self._days = days
self._url = '%s/bbs/%s/index.html'%(BASE_URL, board)
self._next_url = None
self._prev_url = None
self._raw = ''
self._article_urls = []
self.fetch()
self.parse()
self._url = self._prev_url
self.fetch()
self.parse()
def __iter__(self):
return self
def __next__(self):
return self.get_next_post()
def get_next_post(self):
if len(self._article_urls) == 0:
if not self.get_next_page():
raise StopIteration
article = Article(self._article_urls.pop(0), self._days)
if not article._data:
return self.get_next_post()
return article
def get_next_page(self):
if self._prev_url is None: return False
self._url = self._prev_url
self.fetch()
self.parse()
return True
def fetch(self):
global cookies
fail_cnt = 1
while True:
print('try %s %d time'%(self._url, fail_cnt))
fail_cnt += 1
try: r = requests.get(self._url, cookies=cookies, timeout=2)
except:
time.sleep(fail_cnt)
continue
if r.status_code != 200:
time.sleep(fail_cnt)
continue
break
self._raw = r.text
def parse(self):
global BASE_URL
soup = bs(self._raw)
self._article_urls = []
for div in soup.find_all(class_='r-ent'):
if div.find(class_='title').find('a'):
self._article_urls.append(BASE_URL+div.find(class_='title').find('a')['href'])
self._next_url = soup.find(class_='btn-group btn-group-paging').find_all('a')[2].get('href')
self._prev_url = soup.find(class_='btn-group btn-group-paging').find_all('a')[1].get('href')
if self._next_url: self._next_url = BASE_URL + self._next_url
if self._prev_url: self._prev_url = BASE_URL + self._prev_url
class Board:
def __init__(self, board):
self._board = board
def get_articles(self, days=timedelta(days=10**5)):
return ArticleList(self._board, days)
if __name__ == '__main__' :
board = sys.argv[1]
days = timedelta(days=int(sys.argv[2]))
for i, a in enumerate(Board(board).get_articles(days=days)):
print(i, a._data)
|
"""Tools for polynomial factorization routines in characteristic zero. """
from sympy.polys.rings import ring, xring
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys import polyconfig as config
from sympy.polys.polyerrors import DomainError
from sympy.polys.polyclasses import ANP
from sympy.polys.specialpolys import f_polys, w_polys
from sympy import nextprime, sin, sqrt, I
from sympy.testing.pytest import raises, XFAIL
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
w_1, w_2 = w_polys()
def test_dup_trial_division():
R, x = ring("x", ZZ)
assert R.dup_trial_division(
x ** 5 + 8 * x ** 4 + 25 * x ** 3 + 38 * x ** 2 + 28 * x + 8, (x + 1, x + 2)
) == [(x + 1, 2), (x + 2, 3)]
def test_dmp_trial_division():
R, x, y = ring("x,y", ZZ)
assert R.dmp_trial_division(
x ** 5 + 8 * x ** 4 + 25 * x ** 3 + 38 * x ** 2 + 28 * x + 8, (x + 1, x + 2)
) == [(x + 1, 2), (x + 2, 3)]
def test_dup_zz_mignotte_bound():
R, x = ring("x", ZZ)
assert R.dup_zz_mignotte_bound(2 * x ** 2 + 3 * x + 4) == 32
def test_dmp_zz_mignotte_bound():
R, x, y = ring("x,y", ZZ)
assert R.dmp_zz_mignotte_bound(2 * x ** 2 + 3 * x + 4) == 32
def test_dup_zz_hensel_step():
R, x = ring("x", ZZ)
f = x ** 4 - 1
g = x ** 3 + 2 * x ** 2 - x - 2
h = x - 2
s = -2
t = 2 * x ** 2 - 2 * x - 1
G, H, S, T = R.dup_zz_hensel_step(5, f, g, h, s, t)
assert G == x ** 3 + 7 * x ** 2 - x - 7
assert H == x - 7
assert S == 8
assert T == -8 * x ** 2 - 12 * x - 1
def test_dup_zz_hensel_lift():
R, x = ring("x", ZZ)
f = x ** 4 - 1
F = [x - 1, x - 2, x + 2, x + 1]
assert R.dup_zz_hensel_lift(ZZ(5), f, F, 4) == [x - 1, x - 182, x + 182, x + 1]
def test_dup_zz_irreducible_p():
R, x = ring("x", ZZ)
assert (
R.dup_zz_irreducible_p(3 * x ** 4 + 2 * x ** 3 + 6 * x ** 2 + 8 * x + 7) is None
)
assert (
R.dup_zz_irreducible_p(3 * x ** 4 + 2 * x ** 3 + 6 * x ** 2 + 8 * x + 4) is None
)
assert (
R.dup_zz_irreducible_p(3 * x ** 4 + 2 * x ** 3 + 6 * x ** 2 + 8 * x + 10)
is True
)
assert (
R.dup_zz_irreducible_p(3 * x ** 4 + 2 * x ** 3 + 6 * x ** 2 + 8 * x + 14)
is True
)
def test_dup_cyclotomic_p():
R, x = ring("x", ZZ)
assert R.dup_cyclotomic_p(x - 1) is True
assert R.dup_cyclotomic_p(x + 1) is True
assert R.dup_cyclotomic_p(x ** 2 + x + 1) is True
assert R.dup_cyclotomic_p(x ** 2 + 1) is True
assert R.dup_cyclotomic_p(x ** 4 + x ** 3 + x ** 2 + x + 1) is True
assert R.dup_cyclotomic_p(x ** 2 - x + 1) is True
assert (
R.dup_cyclotomic_p(x ** 6 + x ** 5 + x ** 4 + x ** 3 + x ** 2 + x + 1) is True
)
assert R.dup_cyclotomic_p(x ** 4 + 1) is True
assert R.dup_cyclotomic_p(x ** 6 + x ** 3 + 1) is True
assert R.dup_cyclotomic_p(0) is False
assert R.dup_cyclotomic_p(1) is False
assert R.dup_cyclotomic_p(x) is False
assert R.dup_cyclotomic_p(x + 2) is False
assert R.dup_cyclotomic_p(3 * x + 1) is False
assert R.dup_cyclotomic_p(x ** 2 - 1) is False
f = x ** 16 + x ** 14 - x ** 10 + x ** 8 - x ** 6 + x ** 2 + 1
assert R.dup_cyclotomic_p(f) is False
g = x ** 16 + x ** 14 - x ** 10 - x ** 8 - x ** 6 + x ** 2 + 1
assert R.dup_cyclotomic_p(g) is True
R, x = ring("x", QQ)
assert R.dup_cyclotomic_p(x ** 2 + x + 1) is True
assert R.dup_cyclotomic_p(QQ(1, 2) * x ** 2 + x + 1) is False
R, x = ring("x", ZZ["y"])
assert R.dup_cyclotomic_p(x ** 2 + x + 1) is False
def test_dup_zz_cyclotomic_poly():
R, x = ring("x", ZZ)
assert R.dup_zz_cyclotomic_poly(1) == x - 1
assert R.dup_zz_cyclotomic_poly(2) == x + 1
assert R.dup_zz_cyclotomic_poly(3) == x ** 2 + x + 1
assert R.dup_zz_cyclotomic_poly(4) == x ** 2 + 1
assert R.dup_zz_cyclotomic_poly(5) == x ** 4 + x ** 3 + x ** 2 + x + 1
assert R.dup_zz_cyclotomic_poly(6) == x ** 2 - x + 1
assert (
R.dup_zz_cyclotomic_poly(7)
== x ** 6 + x ** 5 + x ** 4 + x ** 3 + x ** 2 + x + 1
)
assert R.dup_zz_cyclotomic_poly(8) == x ** 4 + 1
assert R.dup_zz_cyclotomic_poly(9) == x ** 6 + x ** 3 + 1
def test_dup_zz_cyclotomic_factor():
R, x = ring("x", ZZ)
assert R.dup_zz_cyclotomic_factor(0) is None
assert R.dup_zz_cyclotomic_factor(1) is None
assert R.dup_zz_cyclotomic_factor(2 * x ** 10 - 1) is None
assert R.dup_zz_cyclotomic_factor(x ** 10 - 3) is None
assert R.dup_zz_cyclotomic_factor(x ** 10 + x ** 5 - 1) is None
assert R.dup_zz_cyclotomic_factor(x + 1) == [x + 1]
assert R.dup_zz_cyclotomic_factor(x - 1) == [x - 1]
assert R.dup_zz_cyclotomic_factor(x ** 2 + 1) == [x ** 2 + 1]
assert R.dup_zz_cyclotomic_factor(x ** 2 - 1) == [x - 1, x + 1]
assert R.dup_zz_cyclotomic_factor(x ** 27 + 1) == [
x + 1,
x ** 2 - x + 1,
x ** 6 - x ** 3 + 1,
x ** 18 - x ** 9 + 1,
]
assert R.dup_zz_cyclotomic_factor(x ** 27 - 1) == [
x - 1,
x ** 2 + x + 1,
x ** 6 + x ** 3 + 1,
x ** 18 + x ** 9 + 1,
]
def test_dup_zz_factor():
R, x = ring("x", ZZ)
assert R.dup_zz_factor(0) == (0, [])
assert R.dup_zz_factor(7) == (7, [])
assert R.dup_zz_factor(-7) == (-7, [])
assert R.dup_zz_factor_sqf(0) == (0, [])
assert R.dup_zz_factor_sqf(7) == (7, [])
assert R.dup_zz_factor_sqf(-7) == (-7, [])
assert R.dup_zz_factor(2 * x + 4) == (2, [(x + 2, 1)])
assert R.dup_zz_factor_sqf(2 * x + 4) == (2, [x + 2])
f = x ** 4 + x + 1
for i in range(0, 20):
assert R.dup_zz_factor(f) == (1, [(f, 1)])
assert R.dup_zz_factor(x ** 2 + 2 * x + 2) == (1, [(x ** 2 + 2 * x + 2, 1)])
assert R.dup_zz_factor(18 * x ** 2 + 12 * x + 2) == (2, [(3 * x + 1, 2)])
assert R.dup_zz_factor(-9 * x ** 2 + 1) == (-1, [(3 * x - 1, 1), (3 * x + 1, 1)])
assert R.dup_zz_factor_sqf(-9 * x ** 2 + 1) == (-1, [3 * x - 1, 3 * x + 1])
assert R.dup_zz_factor(x ** 3 - 6 * x ** 2 + 11 * x - 6) == (
1,
[(x - 3, 1), (x - 2, 1), (x - 1, 1)],
)
assert R.dup_zz_factor_sqf(x ** 3 - 6 * x ** 2 + 11 * x - 6) == (
1,
[x - 3, x - 2, x - 1],
)
assert R.dup_zz_factor(3 * x ** 3 + 10 * x ** 2 + 13 * x + 10) == (
1,
[(x + 2, 1), (3 * x ** 2 + 4 * x + 5, 1)],
)
assert R.dup_zz_factor_sqf(3 * x ** 3 + 10 * x ** 2 + 13 * x + 10) == (
1,
[x + 2, 3 * x ** 2 + 4 * x + 5],
)
assert R.dup_zz_factor(-(x ** 6) + x ** 2) == (
-1,
[(x - 1, 1), (x + 1, 1), (x, 2), (x ** 2 + 1, 1)],
)
f = (
1080 * x ** 8
+ 5184 * x ** 7
+ 2099 * x ** 6
+ 744 * x ** 5
+ 2736 * x ** 4
- 648 * x ** 3
+ 129 * x ** 2
- 324
)
assert R.dup_zz_factor(f) == (
1,
[
(5 * x ** 4 + 24 * x ** 3 + 9 * x ** 2 + 12, 1),
(216 * x ** 4 + 31 * x ** 2 - 27, 1),
],
)
f = (
-29802322387695312500000000000000000000 * x ** 25
+ 2980232238769531250000000000000000 * x ** 20
+ 1743435859680175781250000000000 * x ** 15
+ 114142894744873046875000000 * x ** 10
- 210106372833251953125 * x ** 5
+ 95367431640625
)
assert R.dup_zz_factor(f) == (
-95367431640625,
[
(5 * x - 1, 1),
(100 * x ** 2 + 10 * x - 1, 2),
(625 * x ** 4 + 125 * x ** 3 + 25 * x ** 2 + 5 * x + 1, 1),
(10000 * x ** 4 - 3000 * x ** 3 + 400 * x ** 2 - 20 * x + 1, 2),
(10000 * x ** 4 + 2000 * x ** 3 + 400 * x ** 2 + 30 * x + 1, 2),
],
)
f = x ** 10 - 1
config.setup("USE_CYCLOTOMIC_FACTOR", True)
F_0 = R.dup_zz_factor(f)
config.setup("USE_CYCLOTOMIC_FACTOR", False)
F_1 = R.dup_zz_factor(f)
assert (
F_0
== F_1
== (
1,
[
(x - 1, 1),
(x + 1, 1),
(x ** 4 - x ** 3 + x ** 2 - x + 1, 1),
(x ** 4 + x ** 3 + x ** 2 + x + 1, 1),
],
)
)
config.setup("USE_CYCLOTOMIC_FACTOR")
f = x ** 10 + 1
config.setup("USE_CYCLOTOMIC_FACTOR", True)
F_0 = R.dup_zz_factor(f)
config.setup("USE_CYCLOTOMIC_FACTOR", False)
F_1 = R.dup_zz_factor(f)
assert (
F_0 == F_1 == (1, [(x ** 2 + 1, 1), (x ** 8 - x ** 6 + x ** 4 - x ** 2 + 1, 1)])
)
config.setup("USE_CYCLOTOMIC_FACTOR")
def test_dmp_zz_wang():
R, x, y, z = ring("x,y,z", ZZ)
UV, _x = ring("x", ZZ)
p = ZZ(nextprime(R.dmp_zz_mignotte_bound(w_1)))
assert p == 6291469
t_1, k_1, e_1 = y, 1, ZZ(-14)
t_2, k_2, e_2 = z, 2, ZZ(3)
t_3, k_3, e_3 = y + z, 2, ZZ(-11)
t_4, k_4, e_4 = y - z, 1, ZZ(-17)
T = [t_1, t_2, t_3, t_4]
K = [k_1, k_2, k_3, k_4]
E = [e_1, e_2, e_3, e_4]
T = zip([t.drop(x) for t in T], K)
A = [ZZ(-14), ZZ(3)]
S = R.dmp_eval_tail(w_1, A)
cs, s = UV.dup_primitive(S)
assert (
cs == 1
and s
== S
== 1036728 * _x ** 6
+ 915552 * _x ** 5
+ 55748 * _x ** 4
+ 105621 * _x ** 3
- 17304 * _x ** 2
- 26841 * _x
- 644
)
assert R.dmp_zz_wang_non_divisors(E, cs, ZZ(4)) == [7, 3, 11, 17]
assert UV.dup_sqf_p(s) and UV.dup_degree(s) == R.dmp_degree(w_1)
_, H = UV.dup_zz_factor_sqf(s)
h_1 = 44 * _x ** 2 + 42 * _x + 1
h_2 = 126 * _x ** 2 - 9 * _x + 28
h_3 = 187 * _x ** 2 - 23
assert H == [h_1, h_2, h_3]
LC = [lc.drop(x) for lc in [-4 * y - 4 * z, -y * z ** 2, y ** 2 - z ** 2]]
assert R.dmp_zz_wang_lead_coeffs(w_1, T, cs, E, H, A) == (w_1, H, LC)
factors = R.dmp_zz_wang_hensel_lifting(w_1, H, LC, A, p)
assert R.dmp_expand(factors) == w_1
@XFAIL
def test_dmp_zz_wang_fail():
R, x, y, z = ring("x,y,z", ZZ)
UV, _x = ring("x", ZZ)
p = ZZ(nextprime(R.dmp_zz_mignotte_bound(w_1)))
assert p == 6291469
H_1 = [44 * x ** 2 + 42 * x + 1, 126 * x ** 2 - 9 * x + 28, 187 * x ** 2 - 23]
H_2 = [
-4 * x ** 2 * y - 12 * x ** 2 - 3 * x * y + 1,
-9 * x ** 2 * y - 9 * x - 2 * y,
x ** 2 * y ** 2 - 9 * x ** 2 + y - 9,
]
H_3 = [
-4 * x ** 2 * y - 12 * x ** 2 - 3 * x * y + 1,
-9 * x ** 2 * y - 9 * x - 2 * y,
x ** 2 * y ** 2 - 9 * x ** 2 + y - 9,
]
c_1 = (
-70686 * x ** 5 - 5863 * x ** 4 - 17826 * x ** 3 + 2009 * x ** 2 + 5031 * x + 74
)
c_2 = (
9 * x ** 5 * y ** 4
+ 12 * x ** 5 * y ** 3
- 45 * x ** 5 * y ** 2
- 108 * x ** 5 * y
- 324 * x ** 5
+ 18 * x ** 4 * y ** 3
- 216 * x ** 4 * y ** 2
- 810 * x ** 4 * y
+ 2 * x ** 3 * y ** 4
+ 9 * x ** 3 * y ** 3
- 252 * x ** 3 * y ** 2
- 288 * x ** 3 * y
- 945 * x ** 3
- 30 * x ** 2 * y ** 2
- 414 * x ** 2 * y
+ 2 * x * y ** 3
- 54 * x * y ** 2
- 3 * x * y
+ 81 * x
+ 12 * y
)
c_3 = (
-36 * x ** 4 * y ** 2
- 108 * x ** 4 * y
- 27 * x ** 3 * y ** 2
- 36 * x ** 3 * y
- 108 * x ** 3
- 8 * x ** 2 * y ** 2
- 42 * x ** 2 * y
- 6 * x * y ** 2
+ 9 * x
+ 2 * y
)
assert R.dmp_zz_diophantine(H_1, c_1, [], 5, p) == [-3 * x, -2, 1]
assert R.dmp_zz_diophantine(H_2, c_2, [ZZ(-14)], 5, p) == [-x * y, -3 * x, -6]
assert R.dmp_zz_diophantine(H_3, c_3, [ZZ(-14)], 5, p) == [0, 0, -1]
def test_issue_6355():
# This tests a bug in the Wang algorithm that occurred only with a very
# specific set of random numbers.
random_sequence = [-1, -1, 0, 0, 0, 0, -1, -1, 0, -1, 3, -1, 3, 3, 3, 3, -1, 3]
R, x, y, z = ring("x,y,z", ZZ)
f = 2 * x ** 2 + y * z - y - z ** 2 + z
assert R.dmp_zz_wang(f, seed=random_sequence) == [f]
def test_dmp_zz_factor():
R, x = ring("x", ZZ)
assert R.dmp_zz_factor(0) == (0, [])
assert R.dmp_zz_factor(7) == (7, [])
assert R.dmp_zz_factor(-7) == (-7, [])
assert R.dmp_zz_factor(x ** 2 - 9) == (1, [(x - 3, 1), (x + 3, 1)])
R, x, y = ring("x,y", ZZ)
assert R.dmp_zz_factor(0) == (0, [])
assert R.dmp_zz_factor(7) == (7, [])
assert R.dmp_zz_factor(-7) == (-7, [])
assert R.dmp_zz_factor(x) == (1, [(x, 1)])
assert R.dmp_zz_factor(4 * x) == (4, [(x, 1)])
assert R.dmp_zz_factor(4 * x + 2) == (2, [(2 * x + 1, 1)])
assert R.dmp_zz_factor(x * y + 1) == (1, [(x * y + 1, 1)])
assert R.dmp_zz_factor(y ** 2 + 1) == (1, [(y ** 2 + 1, 1)])
assert R.dmp_zz_factor(y ** 2 - 1) == (1, [(y - 1, 1), (y + 1, 1)])
assert R.dmp_zz_factor(x ** 2 * y ** 2 + 6 * x ** 2 * y + 9 * x ** 2 - 1) == (
1,
[(x * y + 3 * x - 1, 1), (x * y + 3 * x + 1, 1)],
)
assert R.dmp_zz_factor(x ** 2 * y ** 2 - 9) == (1, [(x * y - 3, 1), (x * y + 3, 1)])
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_zz_factor(x ** 2 * y ** 2 * z ** 2 - 9) == (
1,
[(x * y * z - 3, 1), (x * y * z + 3, 1)],
)
R, x, y, z, u = ring("x,y,z,u", ZZ)
assert R.dmp_zz_factor(x ** 2 * y ** 2 * z ** 2 * u ** 2 - 9) == (
1,
[(x * y * z * u - 3, 1), (x * y * z * u + 3, 1)],
)
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_zz_factor(f_1) == (
1,
[(x + y * z + 20, 1), (x * y + z + 10, 1), (x * z + y + 30, 1)],
)
assert R.dmp_zz_factor(f_2) == (
1,
[
(x ** 2 * y ** 2 + x ** 2 * z ** 2 + y + 90, 1),
(x ** 3 * y + x ** 3 * z + z - 11, 1),
],
)
assert R.dmp_zz_factor(f_3) == (
1,
[
(x ** 2 * y ** 2 + x * z ** 4 + x + z, 1),
(x ** 3 + x * y * z + y ** 2 + y * z ** 3, 1),
],
)
assert R.dmp_zz_factor(f_4) == (
-1,
[
(x * y ** 3 + z ** 2, 1),
(x ** 2 * z + y ** 4 * z ** 2 + 5, 1),
(x ** 3 * y - z ** 2 - 3, 1),
(x ** 3 * y ** 4 + z ** 2, 1),
],
)
assert R.dmp_zz_factor(f_5) == (-1, [(x + y - z, 3)])
R, x, y, z, t = ring("x,y,z,t", ZZ)
assert R.dmp_zz_factor(f_6) == (
1,
[
(47 * x * y + z ** 3 * t ** 2 - t ** 2, 1),
(45 * x ** 3 - 9 * y ** 3 - y ** 2 + 3 * z ** 3 + 2 * z * t, 1),
],
)
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_zz_factor(w_1) == (
1,
[
(x ** 2 * y ** 2 - x ** 2 * z ** 2 + y - z ** 2, 1),
(x ** 2 * y * z ** 2 + 3 * x * z + 2 * y, 1),
(4 * x ** 2 * y + 4 * x ** 2 * z + x * y * z - 1, 1),
],
)
R, x, y = ring("x,y", ZZ)
f = (
-12 * x ** 16 * y
+ 240 * x ** 12 * y ** 3
- 768 * x ** 10 * y ** 4
+ 1080 * x ** 8 * y ** 5
- 768 * x ** 6 * y ** 6
+ 240 * x ** 4 * y ** 7
- 12 * y ** 9
)
assert R.dmp_zz_factor(f) == (
-12,
[(y, 1), (x ** 2 - y, 6), (x ** 4 + 6 * x ** 2 * y + y ** 2, 1)],
)
def test_dup_ext_factor():
R, x = ring("x", QQ.algebraic_field(I))
def anp(element):
return ANP(element, [QQ(1), QQ(0), QQ(1)], QQ)
assert R.dup_ext_factor(0) == (anp([]), [])
f = anp([QQ(1)]) * x + anp([QQ(1)])
assert R.dup_ext_factor(f) == (anp([QQ(1)]), [(f, 1)])
g = anp([QQ(2)]) * x + anp([QQ(2)])
assert R.dup_ext_factor(g) == (anp([QQ(2)]), [(f, 1)])
f = anp([QQ(7)]) * x ** 4 + anp([QQ(1, 1)])
g = anp([QQ(1)]) * x ** 4 + anp([QQ(1, 7)])
assert R.dup_ext_factor(f) == (anp([QQ(7)]), [(g, 1)])
f = anp([QQ(1)]) * x ** 4 + anp([QQ(1)])
assert R.dup_ext_factor(f) == (
anp([QQ(1, 1)]),
[
(anp([QQ(1)]) * x ** 2 + anp([QQ(-1), QQ(0)]), 1),
(anp([QQ(1)]) * x ** 2 + anp([QQ(1), QQ(0)]), 1),
],
)
f = anp([QQ(4, 1)]) * x ** 2 + anp([QQ(9, 1)])
assert R.dup_ext_factor(f) == (
anp([QQ(4, 1)]),
[
(anp([QQ(1, 1)]) * x + anp([-QQ(3, 2), QQ(0, 1)]), 1),
(anp([QQ(1, 1)]) * x + anp([QQ(3, 2), QQ(0, 1)]), 1),
],
)
f = (
anp([QQ(4, 1)]) * x ** 4
+ anp([QQ(8, 1)]) * x ** 3
+ anp([QQ(77, 1)]) * x ** 2
+ anp([QQ(18, 1)]) * x
+ anp([QQ(153, 1)])
)
assert R.dup_ext_factor(f) == (
anp([QQ(4, 1)]),
[
(anp([QQ(1, 1)]) * x + anp([-QQ(4, 1), QQ(1, 1)]), 1),
(anp([QQ(1, 1)]) * x + anp([-QQ(3, 2), QQ(0, 1)]), 1),
(anp([QQ(1, 1)]) * x + anp([QQ(3, 2), QQ(0, 1)]), 1),
(anp([QQ(1, 1)]) * x + anp([QQ(4, 1), QQ(1, 1)]), 1),
],
)
R, x = ring("x", QQ.algebraic_field(sqrt(2)))
def anp(element):
return ANP(element, [QQ(1), QQ(0), QQ(-2)], QQ)
f = anp([QQ(1)]) * x ** 4 + anp([QQ(1, 1)])
assert R.dup_ext_factor(f) == (
anp([QQ(1)]),
[
(anp([QQ(1)]) * x ** 2 + anp([QQ(-1), QQ(0)]) * x + anp([QQ(1)]), 1),
(anp([QQ(1)]) * x ** 2 + anp([QQ(1), QQ(0)]) * x + anp([QQ(1)]), 1),
],
)
f = anp([QQ(1, 1)]) * x ** 2 + anp([QQ(2), QQ(0)]) * x + anp([QQ(2, 1)])
assert R.dup_ext_factor(f) == (anp([QQ(1, 1)]), [(anp([1]) * x + anp([1, 0]), 2)])
assert R.dup_ext_factor(f ** 3) == (
anp([QQ(1, 1)]),
[(anp([1]) * x + anp([1, 0]), 6)],
)
f *= anp([QQ(2, 1)])
assert R.dup_ext_factor(f) == (anp([QQ(2, 1)]), [(anp([1]) * x + anp([1, 0]), 2)])
assert R.dup_ext_factor(f ** 3) == (
anp([QQ(8, 1)]),
[(anp([1]) * x + anp([1, 0]), 6)],
)
def test_dmp_ext_factor():
R, x, y = ring("x,y", QQ.algebraic_field(sqrt(2)))
def anp(x):
return ANP(x, [QQ(1), QQ(0), QQ(-2)], QQ)
assert R.dmp_ext_factor(0) == (anp([]), [])
f = anp([QQ(1)]) * x + anp([QQ(1)])
assert R.dmp_ext_factor(f) == (anp([QQ(1)]), [(f, 1)])
g = anp([QQ(2)]) * x + anp([QQ(2)])
assert R.dmp_ext_factor(g) == (anp([QQ(2)]), [(f, 1)])
f = anp([QQ(1)]) * x ** 2 + anp([QQ(-2)]) * y ** 2
assert R.dmp_ext_factor(f) == (
anp([QQ(1)]),
[
(anp([QQ(1)]) * x + anp([QQ(-1), QQ(0)]) * y, 1),
(anp([QQ(1)]) * x + anp([QQ(1), QQ(0)]) * y, 1),
],
)
f = anp([QQ(2)]) * x ** 2 + anp([QQ(-4)]) * y ** 2
assert R.dmp_ext_factor(f) == (
anp([QQ(2)]),
[
(anp([QQ(1)]) * x + anp([QQ(-1), QQ(0)]) * y, 1),
(anp([QQ(1)]) * x + anp([QQ(1), QQ(0)]) * y, 1),
],
)
def test_dup_factor_list():
R, x = ring("x", ZZ)
assert R.dup_factor_list(0) == (0, [])
assert R.dup_factor_list(7) == (7, [])
R, x = ring("x", QQ)
assert R.dup_factor_list(0) == (0, [])
assert R.dup_factor_list(QQ(1, 7)) == (QQ(1, 7), [])
R, x = ring("x", ZZ["t"])
assert R.dup_factor_list(0) == (0, [])
assert R.dup_factor_list(7) == (7, [])
R, x = ring("x", QQ["t"])
assert R.dup_factor_list(0) == (0, [])
assert R.dup_factor_list(QQ(1, 7)) == (QQ(1, 7), [])
R, x = ring("x", ZZ)
assert R.dup_factor_list_include(0) == [(0, 1)]
assert R.dup_factor_list_include(7) == [(7, 1)]
assert R.dup_factor_list(x ** 2 + 2 * x + 1) == (1, [(x + 1, 2)])
assert R.dup_factor_list_include(x ** 2 + 2 * x + 1) == [(x + 1, 2)]
# issue 8037
assert R.dup_factor_list(6 * x ** 2 - 5 * x - 6) == (
1,
[(2 * x - 3, 1), (3 * x + 2, 1)],
)
R, x = ring("x", QQ)
assert R.dup_factor_list(QQ(1, 2) * x ** 2 + x + QQ(1, 2)) == (
QQ(1, 2),
[(x + 1, 2)],
)
R, x = ring("x", FF(2))
assert R.dup_factor_list(x ** 2 + 1) == (1, [(x + 1, 2)])
R, x = ring("x", RR)
assert R.dup_factor_list(1.0 * x ** 2 + 2.0 * x + 1.0) == (
1.0,
[(1.0 * x + 1.0, 2)],
)
assert R.dup_factor_list(2.0 * x ** 2 + 4.0 * x + 2.0) == (
2.0,
[(1.0 * x + 1.0, 2)],
)
f = 6.7225336055071 * x ** 2 - 10.6463972754741 * x - 0.33469524022264
coeff, factors = R.dup_factor_list(f)
assert coeff == RR(10.6463972754741)
assert len(factors) == 1
assert factors[0][0].max_norm() == RR(1.0)
assert factors[0][1] == 1
Rt, t = ring("t", ZZ)
R, x = ring("x", Rt)
f = 4 * t * x ** 2 + 4 * t ** 2 * x
assert R.dup_factor_list(f) == (4 * t, [(x, 1), (x + t, 1)])
Rt, t = ring("t", QQ)
R, x = ring("x", Rt)
f = QQ(1, 2) * t * x ** 2 + QQ(1, 2) * t ** 2 * x
assert R.dup_factor_list(f) == (QQ(1, 2) * t, [(x, 1), (x + t, 1)])
R, x = ring("x", QQ.algebraic_field(I))
def anp(element):
return ANP(element, [QQ(1), QQ(0), QQ(1)], QQ)
f = anp([QQ(1, 1)]) * x ** 4 + anp([QQ(2, 1)]) * x ** 2
assert R.dup_factor_list(f) == (
anp([QQ(1, 1)]),
[
(anp([QQ(1, 1)]) * x, 2),
(anp([QQ(1, 1)]) * x ** 2 + anp([]) * x + anp([QQ(2, 1)]), 1),
],
)
R, x = ring("x", EX)
raises(DomainError, lambda: R.dup_factor_list(EX(sin(1))))
def test_dmp_factor_list():
R, x, y = ring("x,y", ZZ)
assert R.dmp_factor_list(0) == (ZZ(0), [])
assert R.dmp_factor_list(7) == (7, [])
R, x, y = ring("x,y", QQ)
assert R.dmp_factor_list(0) == (QQ(0), [])
assert R.dmp_factor_list(QQ(1, 7)) == (QQ(1, 7), [])
Rt, t = ring("t", ZZ)
R, x, y = ring("x,y", Rt)
assert R.dmp_factor_list(0) == (0, [])
assert R.dmp_factor_list(7) == (ZZ(7), [])
Rt, t = ring("t", QQ)
R, x, y = ring("x,y", Rt)
assert R.dmp_factor_list(0) == (0, [])
assert R.dmp_factor_list(QQ(1, 7)) == (QQ(1, 7), [])
R, x, y = ring("x,y", ZZ)
assert R.dmp_factor_list_include(0) == [(0, 1)]
assert R.dmp_factor_list_include(7) == [(7, 1)]
R, X = xring("x:200", ZZ)
f, g = X[0] ** 2 + 2 * X[0] + 1, X[0] + 1
assert R.dmp_factor_list(f) == (1, [(g, 2)])
f, g = X[-1] ** 2 + 2 * X[-1] + 1, X[-1] + 1
assert R.dmp_factor_list(f) == (1, [(g, 2)])
R, x = ring("x", ZZ)
assert R.dmp_factor_list(x ** 2 + 2 * x + 1) == (1, [(x + 1, 2)])
R, x = ring("x", QQ)
assert R.dmp_factor_list(QQ(1, 2) * x ** 2 + x + QQ(1, 2)) == (
QQ(1, 2),
[(x + 1, 2)],
)
R, x, y = ring("x,y", ZZ)
assert R.dmp_factor_list(x ** 2 + 2 * x + 1) == (1, [(x + 1, 2)])
R, x, y = ring("x,y", QQ)
assert R.dmp_factor_list(QQ(1, 2) * x ** 2 + x + QQ(1, 2)) == (
QQ(1, 2),
[(x + 1, 2)],
)
R, x, y = ring("x,y", ZZ)
f = 4 * x ** 2 * y + 4 * x * y ** 2
assert R.dmp_factor_list(f) == (4, [(y, 1), (x, 1), (x + y, 1)])
assert R.dmp_factor_list_include(f) == [(4 * y, 1), (x, 1), (x + y, 1)]
R, x, y = ring("x,y", QQ)
f = QQ(1, 2) * x ** 2 * y + QQ(1, 2) * x * y ** 2
assert R.dmp_factor_list(f) == (QQ(1, 2), [(y, 1), (x, 1), (x + y, 1)])
R, x, y = ring("x,y", RR)
f = 2.0 * x ** 2 - 8.0 * y ** 2
assert R.dmp_factor_list(f) == (RR(8.0), [(0.5 * x - y, 1), (0.5 * x + y, 1)])
f = 6.7225336055071 * x ** 2 * y ** 2 - 10.6463972754741 * x * y - 0.33469524022264
coeff, factors = R.dmp_factor_list(f)
assert coeff == RR(10.6463972754741)
assert len(factors) == 1
assert factors[0][0].max_norm() == RR(1.0)
assert factors[0][1] == 1
Rt, t = ring("t", ZZ)
R, x, y = ring("x,y", Rt)
f = 4 * t * x ** 2 + 4 * t ** 2 * x
assert R.dmp_factor_list(f) == (4 * t, [(x, 1), (x + t, 1)])
Rt, t = ring("t", QQ)
R, x, y = ring("x,y", Rt)
f = QQ(1, 2) * t * x ** 2 + QQ(1, 2) * t ** 2 * x
assert R.dmp_factor_list(f) == (QQ(1, 2) * t, [(x, 1), (x + t, 1)])
R, x, y = ring("x,y", FF(2))
raises(NotImplementedError, lambda: R.dmp_factor_list(x ** 2 + y ** 2))
R, x, y = ring("x,y", EX)
raises(DomainError, lambda: R.dmp_factor_list(EX(sin(1))))
def test_dup_irreducible_p():
R, x = ring("x", ZZ)
assert R.dup_irreducible_p(x ** 2 + x + 1) is True
assert R.dup_irreducible_p(x ** 2 + 2 * x + 1) is False
def test_dmp_irreducible_p():
R, x, y = ring("x,y", ZZ)
assert R.dmp_irreducible_p(x ** 2 + x + 1) is True
assert R.dmp_irreducible_p(x ** 2 + 2 * x + 1) is False
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetTableResult',
'AwaitableGetTableResult',
'get_table',
'get_table_output',
]
@pulumi.output_type
class GetTableResult:
def __init__(__self__, arn=None, attribute_definitions=None, billing_mode=None, contributor_insights_specification=None, global_secondary_indexes=None, id=None, kinesis_stream_specification=None, point_in_time_recovery_specification=None, provisioned_throughput=None, s_se_specification=None, stream_arn=None, stream_specification=None, table_class=None, tags=None, time_to_live_specification=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if attribute_definitions and not isinstance(attribute_definitions, list):
raise TypeError("Expected argument 'attribute_definitions' to be a list")
pulumi.set(__self__, "attribute_definitions", attribute_definitions)
if billing_mode and not isinstance(billing_mode, str):
raise TypeError("Expected argument 'billing_mode' to be a str")
pulumi.set(__self__, "billing_mode", billing_mode)
if contributor_insights_specification and not isinstance(contributor_insights_specification, dict):
raise TypeError("Expected argument 'contributor_insights_specification' to be a dict")
pulumi.set(__self__, "contributor_insights_specification", contributor_insights_specification)
if global_secondary_indexes and not isinstance(global_secondary_indexes, list):
raise TypeError("Expected argument 'global_secondary_indexes' to be a list")
pulumi.set(__self__, "global_secondary_indexes", global_secondary_indexes)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kinesis_stream_specification and not isinstance(kinesis_stream_specification, dict):
raise TypeError("Expected argument 'kinesis_stream_specification' to be a dict")
pulumi.set(__self__, "kinesis_stream_specification", kinesis_stream_specification)
if point_in_time_recovery_specification and not isinstance(point_in_time_recovery_specification, dict):
raise TypeError("Expected argument 'point_in_time_recovery_specification' to be a dict")
pulumi.set(__self__, "point_in_time_recovery_specification", point_in_time_recovery_specification)
if provisioned_throughput and not isinstance(provisioned_throughput, dict):
raise TypeError("Expected argument 'provisioned_throughput' to be a dict")
pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
if s_se_specification and not isinstance(s_se_specification, dict):
raise TypeError("Expected argument 's_se_specification' to be a dict")
pulumi.set(__self__, "s_se_specification", s_se_specification)
if stream_arn and not isinstance(stream_arn, str):
raise TypeError("Expected argument 'stream_arn' to be a str")
pulumi.set(__self__, "stream_arn", stream_arn)
if stream_specification and not isinstance(stream_specification, dict):
raise TypeError("Expected argument 'stream_specification' to be a dict")
pulumi.set(__self__, "stream_specification", stream_specification)
if table_class and not isinstance(table_class, str):
raise TypeError("Expected argument 'table_class' to be a str")
pulumi.set(__self__, "table_class", table_class)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if time_to_live_specification and not isinstance(time_to_live_specification, dict):
raise TypeError("Expected argument 'time_to_live_specification' to be a dict")
pulumi.set(__self__, "time_to_live_specification", time_to_live_specification)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="attributeDefinitions")
def attribute_definitions(self) -> Optional[Sequence['outputs.TableAttributeDefinition']]:
return pulumi.get(self, "attribute_definitions")
@property
@pulumi.getter(name="billingMode")
def billing_mode(self) -> Optional[str]:
return pulumi.get(self, "billing_mode")
@property
@pulumi.getter(name="contributorInsightsSpecification")
def contributor_insights_specification(self) -> Optional['outputs.TableContributorInsightsSpecification']:
return pulumi.get(self, "contributor_insights_specification")
@property
@pulumi.getter(name="globalSecondaryIndexes")
def global_secondary_indexes(self) -> Optional[Sequence['outputs.TableGlobalSecondaryIndex']]:
return pulumi.get(self, "global_secondary_indexes")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="kinesisStreamSpecification")
def kinesis_stream_specification(self) -> Optional['outputs.TableKinesisStreamSpecification']:
return pulumi.get(self, "kinesis_stream_specification")
@property
@pulumi.getter(name="pointInTimeRecoverySpecification")
def point_in_time_recovery_specification(self) -> Optional['outputs.TablePointInTimeRecoverySpecification']:
return pulumi.get(self, "point_in_time_recovery_specification")
@property
@pulumi.getter(name="provisionedThroughput")
def provisioned_throughput(self) -> Optional['outputs.TableProvisionedThroughput']:
return pulumi.get(self, "provisioned_throughput")
@property
@pulumi.getter(name="sSESpecification")
def s_se_specification(self) -> Optional['outputs.TableSSESpecification']:
return pulumi.get(self, "s_se_specification")
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> Optional[str]:
return pulumi.get(self, "stream_arn")
@property
@pulumi.getter(name="streamSpecification")
def stream_specification(self) -> Optional['outputs.TableStreamSpecification']:
return pulumi.get(self, "stream_specification")
@property
@pulumi.getter(name="tableClass")
def table_class(self) -> Optional[str]:
return pulumi.get(self, "table_class")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.TableTag']]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeToLiveSpecification")
def time_to_live_specification(self) -> Optional['outputs.TableTimeToLiveSpecification']:
return pulumi.get(self, "time_to_live_specification")
class AwaitableGetTableResult(GetTableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTableResult(
arn=self.arn,
attribute_definitions=self.attribute_definitions,
billing_mode=self.billing_mode,
contributor_insights_specification=self.contributor_insights_specification,
global_secondary_indexes=self.global_secondary_indexes,
id=self.id,
kinesis_stream_specification=self.kinesis_stream_specification,
point_in_time_recovery_specification=self.point_in_time_recovery_specification,
provisioned_throughput=self.provisioned_throughput,
s_se_specification=self.s_se_specification,
stream_arn=self.stream_arn,
stream_specification=self.stream_specification,
table_class=self.table_class,
tags=self.tags,
time_to_live_specification=self.time_to_live_specification)
def get_table(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTableResult:
"""
Resource Type definition for AWS::DynamoDB::Table
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:dynamodb:getTable', __args__, opts=opts, typ=GetTableResult).value
return AwaitableGetTableResult(
arn=__ret__.arn,
attribute_definitions=__ret__.attribute_definitions,
billing_mode=__ret__.billing_mode,
contributor_insights_specification=__ret__.contributor_insights_specification,
global_secondary_indexes=__ret__.global_secondary_indexes,
id=__ret__.id,
kinesis_stream_specification=__ret__.kinesis_stream_specification,
point_in_time_recovery_specification=__ret__.point_in_time_recovery_specification,
provisioned_throughput=__ret__.provisioned_throughput,
s_se_specification=__ret__.s_se_specification,
stream_arn=__ret__.stream_arn,
stream_specification=__ret__.stream_specification,
table_class=__ret__.table_class,
tags=__ret__.tags,
time_to_live_specification=__ret__.time_to_live_specification)
@_utilities.lift_output_func(get_table)
def get_table_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTableResult]:
"""
Resource Type definition for AWS::DynamoDB::Table
"""
...
|
<filename>report_crawler/report_crawler/pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re
import os
import time
import logging
import pymongo as pm
from html.parser import HTMLParser
from parser.parser import get_information
from spiders.__Global_function import get_localtime, startTime
from spiders.__Global_variable import REPORT_SAVEDIR, LOGGING_SAVEDIR
# Log config
logger = logging.getLogger('Scrapy')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(LOGGING_SAVEDIR, 'logging.log'))
formatter = logging.Formatter('[%(asctime)s] - %(name)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
now_time = str(get_localtime(time.strftime("%Y-%m-%d", time.localtime())))
htmlParser = HTMLParser()
htmlPattern = ' '
class ReportCrawlerPipeline(object):
def process_item(self, item, spider):
# self.deal_with(item)
# return
text = ''
for message in item['text']:
for each in message.xpath(".//text()").extract():
text += unicode(each, type(each).__name__) if type(each).__name__ != 'unicode' else each
text += '\n'
messages = get_information(text, item['faculty'])
# The title come from the item.
if item.has_key('title') and messages['title'] == '':
messages['title'] = item['title'] if re.search(u"(.*?)(教授|专家|院士|博士|学者|研究员|副教授)(.*?)(学术)*(报告|讲座)", item['title']) is None else ''
if re.sub(u"\\s+", '', messages['title']) == '' or re.sub(u"\\s+", '', messages['time']) == '' or \
re.sub(u"\\s+", '', messages['address']) == '' or re.sub(u"\\s+", '', messages['speaker']) == '':
return
dirname = os.path.join(REPORT_SAVEDIR, now_time, item['faculty'][-3:], item['faculty'][:-3])
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = os.path.join(dirname, '{}.txt'.format(item['number']))
# send the information from item to messages
messages['faculty'] = item['faculty']
messages['organizer'] = item['organizer']
messages['link'] = item['link']
messages['publication'] = item['publication']
messages['location'] = item['location']
# get report start time
reportTime = startTime(messages['publication'])
messages['startTime'] = reportTime.get_time(messages['time'])
if messages['startTime'] == None:
messages['startTime'] = ''
with open(filename, 'w') as f:
f.write('Report time:\n' + str(messages['startTime']) + '\n' * 2)
f.write('Title:\n' + messages['title'] + '\n' * 2)
f.write('Time:\n' + messages['time'] + '\n' * 2)
f.write('Address:\n' + messages['address'] + '\n' * 2)
f.write('Speaker:\n' + messages['speaker'] + '\n' * 2)
f.write('Organizer:\n' + messages['organizer'] + '\n' * 2)
if re.sub(u"\\s+", '', messages['biography']) != '':
f.write('Biography:\n' + messages['biography'] + '\n' * 2)
if re.sub(u"\\s+", '', messages['abstract']) != '':
f.write('Abstract:\n' + messages['abstract'] + '\n' * 2)
# save to db
#self.db_save(messages)
# write to log
logger.info(messages['faculty'] + ' - ' + messages['title'])
return
def deal_with(self, item):
text = ''
for message in item['text']:
for each in message.xpath(".//text()").extract():
text += each
text += '\n'
with open('tests/{}.txt'.format(item['number']), 'w') as f:
f.write(str(text))
def db_save(self, messages):
conn = pm.MongoClient('localhost', 27017)
db = conn.get_database('report_db')
col = db.get_collection('reports_without_label')
col.insert(messages)
|
import utils
import os
import unittest
import sys
from test_format_bcif import MockMsgPack, MockFh
if sys.version_info[0] >= 3:
from io import StringIO, BytesIO
else:
from io import BytesIO
StringIO = BytesIO
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
utils.set_search_paths(TOPDIR)
import ihm.dictionary
def add_keyword(name, mandatory, category):
k = ihm.dictionary.Keyword()
k.name, k.mandatory = name, mandatory
category.keywords[k.name] = k
return k
def make_test_dictionary():
d = ihm.dictionary.Dictionary()
c = ihm.dictionary.Category()
c.name = 'test_mandatory_category'
c.mandatory = True
add_keyword("foo", False, c)
k = add_keyword("bar", True, c)
k.item_type = ihm.dictionary.ItemType('int', 'numb', '[+-]?[0-9]+')
d.categories[c.name] = c
c = ihm.dictionary.Category()
c.name = 'test_optional_category'
c.mandatory = False
k = add_keyword("foo", False, c)
# For testing we only accept upper case values
k.item_type = ihm.dictionary.ItemType('text', 'char', r'[ \n\t_()A-Z]+')
k = add_keyword("bar", True, c)
k.enumeration = set(('enum1', 'enum2'))
add_keyword("baz", False, c)
d.categories[c.name] = c
d.linked_items = {'_test_optional_category.baz':
'_test_mandatory_category.foo',
'_test_optional_category.foo': '_entity.id'}
return d
def make_other_test_dictionary():
d = ihm.dictionary.Dictionary()
c = ihm.dictionary.Category()
c.name = 'ext_category'
c.mandatory = False
add_keyword("foo", False, c)
d.categories[c.name] = c
d.linked_items = {'_ext_category.foo': '_test_mandatory_category.foo'}
return d
class Tests(unittest.TestCase):
def test_keyword_enum_case_insen(self):
"""Test KeywordEnumeration (case insensitive)"""
x = ihm.dictionary._KeywordEnumeration()
x.case_sensitive = False
self.assertNotIn('foo', x)
x.add('foo')
self.assertNotIn('bar', x)
self.assertIn('foo', x)
self.assertIn('FOO', x)
x.add('bar')
self.assertIn('BAR', x)
def test_keyword_enum_case_sen(self):
"""Test KeywordEnumeration (case sensitive)"""
x = ihm.dictionary._KeywordEnumeration()
self.assertNotIn('foo', x)
x.add('foo')
self.assertNotIn('bar', x)
self.assertIn('foo', x)
self.assertNotIn('FOO', x)
x.add('bar')
self.assertNotIn('BAR', x)
def test_read(self):
"""Test read() function"""
# Note that _item.category_id is intentionally missing from
# save_unknown_code
cif = r"""
loop_
_item_type_list.code
_item_type_list.primitive_code
_item_type_list.construct
code char '[][_,.;:"&<>()/\{}'`~!@#$%A-Za-z0-9*|+-]*'
ucode uchar '[][_,.;:"&<>()/\{}'`~!@#$%A-Za-z0-9*|+-]*'
save_foo
_category.id test_category1
_category.mandatory_code yes
save_
save_bar
loop_
_item.name
_item.category_id
_item.mandatory_code
'_test_category1.bar' test_category1 no
'_test_category3.bar' test_category3 yes
_item_type.code code
save_
save_unknown_code
_item.name '_test_category1.unknown_code'
_item.mandatory_code no
_item_type.code atcode
save_
save_missing_code
_item.name '_test_category1.missing_code'
_item.category_id test_category1
_item.mandatory_code no
save_
save_insensitive_code
_item.name '_test_category1.insensitive_code'
_item.category_id test_category1
_item.mandatory_code no
_item_type.code ucode
save_
save_baz
_item.name '_test_category2.baz'
_item.category_id test_category2
_item.mandatory_code no
_item_type.code ucode
_item_linked.child_name '_test_category2.baz'
_item_linked.parent_name '_test_category1.bar'
loop_
_item_enumeration.value
"enum 1"
"enum 2"
save_
"""
d = ihm.dictionary.read(StringIO(cif))
self.assertEqual(sorted(d.categories.keys()),
['test_category1', 'test_category2',
'test_category3'])
c1 = d.categories['test_category1']
self.assertTrue(c1.mandatory)
self.assertEqual(
sorted(c1.keywords.keys()),
['bar', 'insensitive_code', 'missing_code', 'unknown_code'])
self.assertFalse(c1.keywords['bar'].mandatory)
self.assertIsNone(c1.keywords['bar'].enumeration)
self.assertEqual(c1.keywords['bar'].item_type.name, "code")
self.assertTrue(c1.keywords['bar'].item_type.case_sensitive)
self.assertIsNone(c1.keywords['missing_code'].item_type)
self.assertIsNone(c1.keywords['unknown_code'].item_type)
self.assertFalse(
c1.keywords['insensitive_code'].item_type.case_sensitive)
c2 = d.categories['test_category2']
self.assertIsNone(c2.mandatory)
self.assertEqual(sorted(c2.keywords.keys()), ["baz"])
self.assertFalse(c2.keywords['baz'].mandatory)
self.assertEqual(c2.keywords['baz'].enumeration,
set(('enum 1', 'enum 2')))
self.assertFalse(c2.keywords['baz'].enumeration.case_sensitive)
self.assertFalse(c2.keywords['baz'].item_type.case_sensitive)
c3 = d.categories['test_category3']
self.assertIsNone(c3.mandatory)
self.assertEqual(sorted(c3.keywords.keys()), ["bar"])
self.assertTrue(c3.keywords['bar'].mandatory)
self.assertEqual(d.linked_items,
{'_test_category2.baz': '_test_category1.bar'})
if sys.version_info[0] >= 3:
# Make sure that files can be read in binary mode
d = ihm.dictionary.read(BytesIO(cif.encode('latin-1')))
self.assertEqual(sorted(d.categories.keys()),
['test_category1', 'test_category2',
'test_category3'])
def test_add(self):
"""Test adding two Dictionaries"""
d1 = make_test_dictionary()
d2 = make_other_test_dictionary()
d = d1 + d2
self._check_test_dictionary(d1)
self._check_other_test_dictionary(d2)
self._check_summed_dictionary(d)
def test_add_update(self):
"""Test add Dictionaries that both contain same Category"""
d1 = make_test_dictionary()
d2 = ihm.dictionary.Dictionary()
c = ihm.dictionary.Category()
c.name = 'test_mandatory_category'
c.mandatory = True
add_keyword("baz", False, c)
d2.categories[c.name] = c
d = d1 + d2
self.assertEqual(sorted(d.categories.keys()),
['test_mandatory_category', 'test_optional_category'])
ks = sorted(d.categories['test_mandatory_category'].keywords.keys())
# Category should now contain keywords from from dictionaries
self.assertEqual(ks, ['bar', 'baz', 'foo'])
def test_category_update(self):
"""Test Category._update()"""
cman = ihm.dictionary.Category()
cman.name = 'test_mandatory_category'
cman.description = 'my description'
cman.mandatory = True
add_keyword("foo", False, cman)
coth = ihm.dictionary.Category()
coth.name = 'test_mandatory_category'
coth.description = 'desc2'
coth.mandatory = False
add_keyword("bar", False, coth)
cman._update(coth)
self.assertIs(cman.mandatory, True)
self.assertEqual(cman.description, 'my description')
self.assertEqual(sorted(cman.keywords.keys()), ['bar', 'foo'])
cnone = ihm.dictionary.Category()
cnone.name = 'test_mandatory_category'
cnone._update(coth)
self.assertIs(cnone.mandatory, False)
self.assertEqual(cnone.description, 'desc2')
self.assertEqual(sorted(cnone.keywords.keys()), ['bar'])
def test_add_inplace(self):
"""Test adding two Dictionaries in place"""
d1 = make_test_dictionary()
d2 = make_other_test_dictionary()
d1 += d2
self._check_other_test_dictionary(d2)
self._check_summed_dictionary(d1)
def _check_test_dictionary(self, d):
self.assertEqual(sorted(d.categories.keys()),
['test_mandatory_category', 'test_optional_category'])
self.assertEqual(d.linked_items,
{'_test_optional_category.baz':
'_test_mandatory_category.foo',
'_test_optional_category.foo': '_entity.id'})
def _check_other_test_dictionary(self, d):
self.assertEqual(sorted(d.categories.keys()),
['ext_category'])
self.assertEqual(d.linked_items,
{'_ext_category.foo': '_test_mandatory_category.foo'})
def _check_summed_dictionary(self, d):
self.assertEqual(sorted(d.categories.keys()),
['ext_category', 'test_mandatory_category',
'test_optional_category'])
self.assertEqual(d.linked_items,
{'_test_optional_category.baz':
'_test_mandatory_category.foo',
'_test_optional_category.foo': '_entity.id',
'_ext_category.foo': '_test_mandatory_category.foo'})
def test_validate_ok(self):
"""Test successful validation"""
d = make_test_dictionary()
d.validate(StringIO("_test_mandatory_category.bar 1"))
def test_validate_ok_binary_cif(self):
"""Test successful validation of BinaryCIF input"""
sys.modules['msgpack'] = MockMsgPack
d = make_test_dictionary()
fh = MockFh()
writer = ihm.format_bcif.BinaryCifWriter(fh)
writer.start_block('ihm')
with writer.category('_test_mandatory_category') as loc:
loc.write(bar=1)
with writer.category('_test_optional_category') as loc:
loc.write(bar='enum1')
writer.flush()
d.validate(fh.data, format='BCIF')
def test_validate_multi_data_ok(self):
"""Test successful validation of multiple data blocks"""
d = make_test_dictionary()
d.validate(StringIO("""
data_block1
_test_mandatory_category.bar 1
data_block2
_test_mandatory_category.bar 2
"""))
def test_validate_missing_mandatory_category(self):
"""Test validation failure with missing mandatory category"""
d = make_test_dictionary()
self.assertRaises(ihm.dictionary.ValidatorError,
d.validate, StringIO("_struct.entry_id id1"))
def test_validate_missing_mandatory_keyword(self):
"""Test validation failure with missing mandatory keyword"""
d = make_test_dictionary()
# mandatory 'bar' is marked unknown
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO("_test_mandatory_category.bar ?"))
# mandatory 'bar' is missing entirely
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO("_test_mandatory_category.foo xy"))
def test_validate_enumeration(self):
"""Test validation of enumerated values"""
prefix = """_test_mandatory_category.bar 1
_test_optional_category.bar """
d = make_test_dictionary()
# Value in the enumeration is OK
d.validate(StringIO(prefix + 'enum1'))
# Omitted value is OK
d.validate(StringIO(prefix + '.'))
# Value not in the enumeration is not OK
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + 'bad'))
def test_validate_item_type_int(self):
"""Test validation of int item type"""
prefix = "_test_mandatory_category.bar "
d = make_test_dictionary()
# Int value is OK
d.validate(StringIO(prefix + '+45'))
d.validate(StringIO(prefix + '-4'))
d.validate(StringIO(prefix + '5'))
# Omitted value is OK
d.validate(StringIO(prefix + '.'))
# Non-int value is not OK
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + '45A'))
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + 'foo'))
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + '++44'))
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + '44+'))
def test_validate_item_type_multiline(self):
"""Test validation of multiline item type"""
# This regex '[ \n\t_()A-Z]+' includes \n and \t special characters,
# which should match newline and tab, not literal \n and \t
prefix = """_test_mandatory_category.bar 1
_test_optional_category.bar enum1
_test_optional_category.foo """
d = make_test_dictionary()
# OK strings
d.validate(StringIO(prefix + '"FOO BAR"'))
d.validate(StringIO(prefix + '"FOO_BAR"'))
d.validate(StringIO(prefix + '"FOO\tBAR"'))
d.validate(StringIO(prefix + '\n;FOO\nBAR\n;'))
# Bad strings
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + '"foo BAR"'))
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + '"FOO\\BAR"'))
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + 'n'))
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix + 't'))
def test_item_type_bad_regex(self):
"""Make sure that ItemType handles invalid regex"""
# "+" is not a valid Python regex; it should be skipped and will
# match any value
it = ihm.dictionary.ItemType("test", "text", "+")
self.assertTrue(it.regex.match("something"))
self.assertTrue(it.regex.match(None))
def test_validate_linked_items(self):
"""Test validation of linked items"""
prefix = "_test_mandatory_category.bar 1\n"
d = make_test_dictionary()
c = ihm.dictionary.Category()
c.name = 'chem_comp_atom'
add_keyword("foo", False, c)
d.categories[c.name] = c
d.linked_items['_test_optional_category.bar'] \
= '_chem_comp_atom.atom_id'
# OK: same key in child and parent
d.validate(StringIO(prefix +
"_test_optional_category.bar .\n"
"_test_optional_category.baz 42\n"
"_test_mandatory_category.foo 42"))
# OK: missing parent key but in category not in the dictionary
d.validate(StringIO(prefix +
"_test_optional_category.bar .\n"
"_test_optional_category.foo AB"))
# OK: missing parent key but chem_comp_* is explicitly excluded
# from validation
d.validate(StringIO(prefix +
"_test_optional_category.bar enum1"))
# Not OK: parent is missing or does not include the child key
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix +
"_test_optional_category.bar .\n"
"_test_optional_category.baz 42\n"
"_test_mandatory_category.foo 24"))
self.assertRaises(ihm.dictionary.ValidatorError, d.validate,
StringIO(prefix +
"_test_optional_category.bar .\n"
"_test_optional_category.baz 42"))
def test_unknown_category(self):
"""Test validator failure for unknown categories"""
d = make_test_dictionary()
self.assertRaises(
ihm.dictionary.ValidatorError, d.validate,
StringIO("_test_mandatory_category.bar 1\n_foo.bar baz"))
def test_unknown_keyword(self):
"""Test validator failure for unknown keywords"""
d = make_test_dictionary()
self.assertRaises(
ihm.dictionary.ValidatorError, d.validate,
StringIO("_test_mandatory_category.bar 1\n"
"_test_mandatory_category.unk 42"))
if __name__ == '__main__':
unittest.main()
|
<filename>app/blockchain/updateBlockHeightState.py
'''
idea of this script is to each time:
determine bolck height of main chain
save relative height of tracked nodes
save overall statistics about heights of nodes in network
see config.py for more info abou config values
'''
#app imports
from app import app, db
from app.models import Node, BlockHeight, ChainState
#my imports
from app.blockchain.nemConnectAsync import nemConnectAsync
# other imports
from collections import Counter
from time import sleep
import json
#config
NODE_IMPORTANCE_TRESHOLD = app.config['NODE_IMPORTANCE_TRESHOLD']
BLOCK_HEIGHT_ACCEPT_TRESHOLD = app.config['BLOCK_HEIGHT_ACCEPT_TRESHOLD']
MAX_TRYES_TO_REACH_BLOCK_HEIGHT_ACCEPT_TRESHOLD = app.config['MAX_TRYES_TO_REACH_BLOCK_HEIGHT_ACCEPT_TRESHOLD']
SLEEP_BETWEEN_TRYES_TO_REACH_BLOCK_HEIGHT_ACCEPT_TRESHOLD = app.config['SLEEP_BETWEEN_TRYES_TO_REACH_BLOCK_HEIGHT_ACCEPT_TRESHOLD']
ASYNC_REQUEST_TIMEOUT = app.config['ASYNC_REQUEST_TIMEOUT']
#nemConnect setup
nis = nemConnectAsync(ASYNC_REQUEST_TIMEOUT)
def dbGetActiveNodes(node_importance_treshold):
# get all active nodes from db ordered by importance decresing
q = db.session.query(Node.id, Node.endpoint, Node.track_block_height)\
.filter(Node.active == True)\
.order_by(Node.importance.desc())\
.all()
# get how many nodes are with importance over node_importance_treshold -> will be used as trusted nodes
if node_importance_treshold:
trusted_height_count = db.session.query(db.func.count(Node.id))\
.filter(Node.active == True, Node.importance > node_importance_treshold)\
.one()[0]
else:
trusted_height_count = db.session.query(db.func.count(Node.id))\
.filter(Node.active == True)\
.one()[0]
# unzip data from query
# id_list - list of ids of each node in db
# endpoint_list - list og endpoints of each node
# track_block_height - list of True/False - if block height of that node should be recorded to db at the end of this excesise
# this script should be run every 1-10 minutes for years to come ..
# it is impossible in future we will ask money for node to be added to track_block_height
id_list, endpoint_list, track_block_height = zip(*q)
# each of this lists will be used separatelly and than zipped back together
# trusted_height_count is count of nodes with importance above NODE_IMPORTANCE_TRESHOLD
return id_list, endpoint_list, track_block_height, trusted_height_count
def dbUpdateChainState(id_list, endpoint_list, height_list, track_block_height, time_stamp):
# get data for ChainState table
timeout_count = len([h for h in height_list if h is None])
# get height distribution of all nodes
height_distribution = Counter(height_list).most_common()
# get most common block height
# height distribution can be [] - empty list
if height_distribution:
most_common_height, most_common_count = height_distribution[0]
# None can be most common block height, if many nodes dont respond
if most_common_height is None:
most_common_height, most_common_count = height_distribution[1]
else:
most_common_height, most_common_count = 0, 0
# create new ChainState
chain_state = ChainState(time=time_stamp, most_common_height=most_common_height, most_common_count=most_common_count,\
height_distribution=json.dumps(height_distribution), timeout_count=timeout_count,\
nodes_tested=len(id_list))
# get list of (node_id,block_height) for all tracked nodes (see Node.track_block_height)
nodes_to_track = [(row[0],row[1]) for row in zip(id_list,height_list,track_block_height) if row[2]]
# append block_height of each node to current chain_state
for id, height in nodes_to_track:
if height is not None:
relative_height = height - most_common_height
else:
relative_height = None
chain_state.block_height.append(BlockHeight(node_id=id, relative_height=relative_height))
# add chain_state and all block_heights append to it to db.session
db.session.add(chain_state)
db.session.commit()
# this function reads REQUEST_TIMEOUT, but only for logging
def blockHeightAcceptCondition(ok_nodes_count, trusted_nodes_count, treshold, max_tryes):
print(f'try {blockHeightAcceptCondition.counter}, nodes in sync {ok_nodes_count *100 / trusted_nodes_count}%, {treshold*100}% needed')
blockHeightAcceptCondition.counter += 1
if blockHeightAcceptCondition.counter <= max_tryes:
return ok_nodes_count / trusted_nodes_count > treshold
else:
raise Exception('Max tryes for block height sync reached, '
f'increase REQUEST_TIMEOUT(={ASYNC_REQUEST_TIMEOUT}) or '
f'MAX_TRYES_TO_REACH_BLOCK_HEIGHT_ACCEPT_TRESHOLD(={max_tryes}) or '
f'decrease BLOCK_HEIGHT_ACCEPT_TRESHOLD(={treshold})')
def main():
# get ordered lists of data corresponding to each node (every active node in Nodes table )
# each of this lists will be used separatelly and than zipped back together
# trusted_height_count is count of nodes with importance above NODE_IMPORTANCE_TRESHOLD
# check dbGetActiveNodes() for more info
id_list, endpoint_list, track_block_height, trusted_height_count = dbGetActiveNodes(NODE_IMPORTANCE_TRESHOLD)
# most_common_count is number of nodes that are on most prevelant block height
# we get this count by asking only trusted nodes for bock height
most_common_count, blockHeightAcceptCondition.counter = 0, 0
# start of network block height convergence procedure
while not blockHeightAcceptCondition(
most_common_count, trusted_height_count,
BLOCK_HEIGHT_ACCEPT_TRESHOLD,
MAX_TRYES_TO_REACH_BLOCK_HEIGHT_ACCEPT_TRESHOLD):
if blockHeightAcceptCondition.counter > 1:
print('sleeping')
sleep(SLEEP_BETWEEN_TRYES_TO_REACH_BLOCK_HEIGHT_ACCEPT_TRESHOLD)
# get height of each node asynchronously and
# time_stamp right before grequests.map() -> start of sending requests to nodes
height_list, time_stamp = nis.getHeightOfEachNode(endpoint_list)
print(f'height_distribution: {Counter(height_list).most_common()}')
# height_list contains height, if everything OK or None if anything wrong
# trust_height is ordered list of True/False
trust_height = [True if i<trusted_height_count else False for i in range(len(id_list))]
trusted_height_list_no_Nones = [row[0] for row in zip(height_list,trust_height) if row[0] and row[1]]
# get distribution of heights of trusted nodes
truested_height_distribution = Counter(trusted_height_list_no_Nones).most_common()
# get most common block height and noumber of nodes whit that height
# -> will determine if that block_height will be considered block height of network
if truested_height_distribution:
most_common_height, most_common_count = truested_height_distribution[0]
else:
most_common_height, most_common_count = 0, 0
dbUpdateChainState(id_list, endpoint_list, height_list, track_block_height, time_stamp)
if __name__ == '__main__':
main()
|
<filename>pynn/rnn.py
"""
Recurrent neural networks.
TODO: this is not a complete implementation.
<NAME>, 05/2015
"""
import nn
import layer
import learner
import numpy as np
import gnumpy as gnp
import math
import struct
class RNN(nn.BaseNeuralNet):
def __init__(self, in_dim=None, out_dim=None, nonlin_type=layer.NONLIN_NAME_TANH):
if out_dim is None:
return
self.in_dim = in_dim
self.has_input = in_dim is not None
self.out_dim = out_dim
self.nonlin = layer.get_nonlin_from_type_name(nonlin_type)
self._init_params()
self._update_param_size()
def _init_params(self):
if self.has_input:
self.W_ih = gnp.randn(self.in_dim, self.out_dim) / math.sqrt(self.in_dim)
self.dW_ih = self.W_ih * 0
self.W_hh = gnp.eye(self.out_dim)
self.b = gnp.zeros(self.out_dim)
self.dW_hh = self.W_hh * 0
self.db = self.b * 0
self._update_param_size()
def forward_prop(self, X=None, T=10, h_init=None, **kwargs):
"""
options:
- X can be None, when there's no input, then T must be specified
- if X is not None, T will not be used
- an extra h_init can be given to the forward prop to feed into the
first hidden state activation.
"""
if X is not None and self.has_input:
X = gnp.as_garray(X)
self.X = X
T = X.shape[0]
self.A = X.dot(self.W_ih) + self.b
else:
self.X = None
self.A = self.b.tile((T,1))
self.H = gnp.empty((T, self.out_dim))
if h_init is not None:
self.h_init = gnp.as_garray(h_init)
self.A[0] += self.h_init.reshape(1,-1).dot(self.W_hh)
else:
self.h_init = None
self.H[0] = self.nonlin.forward_prop(self.A[0])
for t in range(1, T):
self.A[t] += self.H[t-1].reshape(1,-1).dot(self.W_hh)
self.H[t] = self.nonlin.forward_prop(self.A[t])
return self.H
def backward_prop(self, grad=None, grad_end=None):
if grad is not None:
T = grad.shape[0]
assert T == self.H.shape[0]
dH = grad.copy()
else:
T = self.H.shape[0]
dH = gnp.zeros((T, self.H.shape[1]))
if grad_end is not None:
dH[-1] += gnp.as_garray(grad_end).ravel()
dA = gnp.empty((dH.shape[0], dH.shape[1]))
for t in range(1,T)[::-1]:
dA[t] = self.nonlin.backward_prop(self.A[t], self.H[t]) * dH[t]
dH[t-1] += self.W_hh.dot(dA[t].reshape(-1,1)).ravel()
dA[0] = self.nonlin.backward_prop(self.A[0], self.H[0]) * dH[0]
self.dW_hh += self.H[:-1].T.dot(dA[1:])
if self.h_init is not None:
self.dW_hh += self.h_init.reshape(-1,1).dot(dA[0].reshape(1,-1))
self.db += dA.sum(axis=0)
if self.X is not None:
dX = dA.dot(self.W_ih.T)
self.dW_ih += self.X.T.dot(dA)
else:
dX = None
if self.h_init is not None:
self.dh_init = self.W_hh.dot(dA[0].reshape(-1,1)).ravel()
return dX
def get_h_init_grad(self):
return self.dh_init if self.h_init is not None else None
def clear_gradient(self):
if self.has_input:
self.dW_ih[:] = 0
self.dW_hh[:] = 0
self.db[:] = 0
def get_param_vec(self):
if self.has_input:
return np.r_[self.W_ih.asarray().ravel(), self.W_hh.asarray().ravel(), self.b.asarray().ravel()]
else:
return np.r_[self.W_hh.asarray().ravel(), self.b.asarray().ravel()]
def get_noiseless_param_vec(self):
return self.get_param_vec()
def _set_param_from_vec(self, v, is_noiseless=False):
if self.has_input:
self.W_ih = gnp.garray(v[:self.W_ih.size].reshape(self.W_ih.shape))
self.W_hh = gnp.garray(v[-self.W_hh.size-self.b.size:-self.b.size].reshape(self.W_hh.shape))
self.b = gnp.garray(v[-self.b.size:])
def get_grad_vec(self):
if self.has_input:
return np.r_[self.dW_ih.asarray().ravel(), self.dW_hh.asarray().ravel(), self.db.asarray().ravel()]
else:
return np.r_[self.dW_hh.asarray().ravel(), self.db.asarray().ravel()]
def save_model_to_binary(self):
return struct.pack('i', self.get_type_code()) + self._save_model_to_binary()
def _save_model_to_binary(self):
s = struct.pack('iiii', (1 if self.has_input else 0), (self.in_dim if self.has_input else 0),
self.out_dim, self.nonlin.get_id())
if self.has_input:
s += self.W_ih.asarray().astype(np.float32).tostring()
s += self.W_hh.asarray().astype(np.float32).tostring()
s += self.b.asarray().astype(np.float32).tostring()
return s
def load_model_from_stream(self, f):
self.check_type_code(struct.unpack('i', f.read(4))[0])
self._load_model_from_stream(f)
def _load_model_from_stream(self, f):
has_input, self.in_dim, self.out_dim, nonlin_id = struct.unpack('iiii', f.read(4*4))
self.has_input = has_input == 1
if not self.has_input:
self.in_dim = None
self.nonlin = layer.get_nonlin_from_type_id(nonlin_id)
if self.has_input:
self.W_ih = gnp.garray(np.fromstring(f.read(self.in_dim*self.out_dim*4),
dtype=np.float32).reshape(self.in_dim, self.out_dim))
self.dW_ih = self.W_ih * 0
self.W_hh = gnp.garray(np.fromstring(f.read(self.out_dim*self.out_dim*4),
dtype=np.float32).reshape(self.out_dim, self.out_dim))
self.b = gnp.garray(np.fromstring(f.read(self.out_dim*4), dtype=np.float32))
self.dW_hh = self.W_hh * 0
self.b = self.b * 0
self._update_param_size()
@staticmethod
def get_type_code():
return 0x0399
def __repr__(self):
if self.has_input:
return 'rnn %d -> %d (%s)' % (self.in_dim, self.out_dim, self.nonlin.get_name())
else:
return 'rnn <no input> %d (%s)' % (self.out_dim, self.nonlin.get_name())
def _update_param_size(self):
self.param_size = self.W_hh.size + self.b.size
if self.has_input:
self.param_size += self.W_ih.size
class RnnHybridNetwork(nn.BaseNeuralNet):
"""
RNN network plus a feed-forward neural net on top of the RNN outputs.
The RNN network itself can also contain multiple layers, and interleaved
with feed-forward neural nets.
TODO:
- mixing RNN and feed-forward net is to be implemented
- right now the implementation assumes there is a single RNN at the bottom
and a neural net on top.
"""
def __init__(self, rnn=None, feedforward_net=None):
if rnn is None or feedforward_net is None:
return
self.rnn = rnn
self.feedforward_net = feedforward_net
self._update_param_size()
self.in_dim = rnn.in_dim
self.out_dim = feedforward_net.out_dim
def load_target(self, *args, **kwargs):
self.feedforward_net.load_target(*args, **kwargs)
def get_loss(self):
return self.feedforward_net.get_loss()
def forward_prop(self, X=None, T=10, h_init=None, **kwargs):
"""
options:
- X can be None, when there's no input, then T must be specified
- if X is not None, T will not be used
- an extra h_init can be given to the forward prop to feed into the
first hidden state activation.
"""
H = self.rnn.forward_prop(X=X, T=T, h_init=h_init)
return self.feedforward_net.forward_prop(H, **kwargs)
def backward_prop(self, grad=None):
dH = self.feedforward_net.backward_prop(grad=grad)
return self.rnn.backward_prop(grad=dH)
def get_h_init_grad(self):
return self.rnn.get_h_init_grad()
def clear_gradient(self):
self.rnn.clear_gradient()
self.feedforward_net.clear_gradient()
def get_param_vec(self):
return np.r_[self.rnn.get_param_vec(), self.feedforward_net.get_param_vec()]
def get_noiseless_param_vec(self):
return np.r_[self.rnn.get_noiseless_param_vec(), self.feedforward_net.get_noiseless_param_vec()]
def _set_param_from_vec(self, v, is_noiseless=False):
self.rnn._set_param_from_vec(v[:self.rnn.param_size], is_noiseless=is_noiseless)
self.feedforward_net._set_param_from_vec(v[self.rnn.param_size:], is_noiseless=is_noiseless)
def get_grad_vec(self):
return np.r_[self.rnn.get_grad_vec(), self.feedforward_net.get_grad_vec()]
def save_model_to_binary(self):
return struct.pack('i', self.get_type_code()) + self._save_model_to_binary()
def _save_model_to_binary(self):
s = self.rnn.save_model_to_binary()
s += self.feedforward_net.save_model_to_binary()
return s
def load_model_from_stream(self, f):
self.check_type_code(struct.unpack('i', f.read(4))[0])
self._load_model_from_stream(f)
def _load_model_from_stream(self, f):
self.rnn = load_rnn_from_stream(f)
self.feedforward_net = nn.NeuralNet()
self.feedforward_net.load_model_from_stream(f)
self._update_param_size()
self.in_dim = self.rnn.in_dim
self.out_dim = self.feedforward_net.out_dim
@staticmethod
def get_type_code():
return 0x0369
def __repr__(self):
return str(self.rnn) + ' >---< ' + str(self.feedforward_net)
def _update_param_size(self):
self.param_size = self.rnn.param_size + self.feedforward_net.param_size
class RnnOnNeuralNet(nn.BaseNeuralNet):
"""
RNN at the very top, with input passed through a feedforward neural net
before feeding into the RNN.
"""
def __init__(self, net=None, rnn=None):
if net is None or rnn is None:
return
assert net.out_dim == rnn.in_dim
self.net = net
self.rnn = rnn
self._update_param_size()
self.in_dim = self.net.in_dim
self.out_dim = self.rnn.out_dim
def forward_prop(self, X, T=None, h_init=None, **kwargs):
"""
options:
- an extra h_init can be given to the forward prop to feed into the
first hidden state activation.
- T is ignored here
"""
X_in = self.net.forward_prop(X, **kwargs)
return self.rnn.forward_prop(X=X_in, h_init=h_init)
def backward_prop(self, grad=None, grad_end=None):
dX_in = self.rnn.backward_prop(grad=grad, grad_end=grad_end)
return self.net.backward_prop(grad=dX_in)
def get_h_init_grad(self):
return self.rnn.get_h_init_grad()
def clear_gradient(self):
self.net.clear_gradient()
self.rnn.clear_gradient()
def get_param_vec(self):
return np.r_[self.net.get_param_vec(), self.rnn.get_param_vec()]
def get_noiseless_param_vec(self):
return np.r_[self.net.get_noiseless_param_vec(), self.rnn.get_noiseless_param_vec()]
def _set_param_from_vec(self, v, is_noiseless=False):
self.net._set_param_from_vec(v[:self.net.param_size], is_noiseless=is_noiseless)
self.rnn._set_param_from_vec(v[self.net.param_size:], is_noiseless=is_noiseless)
def get_grad_vec(self):
return np.r_[self.net.get_grad_vec(), self.rnn.get_grad_vec()]
def save_model_to_binary(self):
return struct.pack('i', self.get_type_code()) + self._save_model_to_binary()
def _save_model_to_binary(self):
s = self.net.save_model_to_binary()
s += self.rnn.save_model_to_binary()
return s
def load_model_from_stream(self, f):
self.check_type_code(struct.unpack('i', f.read(4))[0])
self._load_model_from_stream(f)
def _load_model_from_stream(self, f):
self.net = nn.NeuralNet()
self.net.load_model_from_stream(f)
self.rnn = load_rnn_from_stream(f)
self._update_param_size()
self.in_dim = self.net.in_dim
self.out_dim = self.rnn.out_dim
@staticmethod
def get_type_code():
return 0x0301
def __repr__(self):
return str(self.net) + ' >---< ' + str(self.rnn)
def _update_param_size(self):
self.param_size = self.net.param_size + self.rnn.param_size
class RnnAutoEncoder(nn.BaseNeuralNet):
"""
Combination of an encoder RNN and a decoder RnnHybridNetwork.
"""
def __init__(self, encoder=None, decoder=None):
if encoder is None or decoder is None:
return
self.encoder = encoder
self.decoder = decoder
self._update_param_size()
self.in_dim = encoder.in_dim
self.out_dim = decoder.out_dim
def load_target(self, *args, **kwargs):
pass
def get_loss(self):
return self.decoder.get_loss()
def encode(self, X, h_init=None):
H = self.encoder.forward_prop(X=X, h_init=h_init)
return H[-1]
def forward_prop(self, X, h_init=None, **kwargs):
"""
options:
- an extra h_init can be given to the forward prop to feed into the
first hidden state activation.
"""
# input is the target
if kwargs.get('compute_loss', False) == True:
self.decoder.load_target(X)
H_encoder = self.encoder.forward_prop(X=X, h_init=h_init)
return self.decoder.forward_prop(T=X.shape[0], h_init=H_encoder[-1], **kwargs)
def backward_prop(self, grad=None):
self.decoder.backward_prop(grad=grad)
return self.encoder.backward_prop(grad_end=self.decoder.get_h_init_grad())
def clear_gradient(self):
self.encoder.clear_gradient()
self.decoder.clear_gradient()
def get_param_vec(self):
return np.r_[self.encoder.get_param_vec(), self.decoder.get_param_vec()]
def get_noiseless_param_vec(self):
return np.r_[self.encoder.get_noiseless_param_vec(),
self.decoder.get_noiseless_param_vec()]
def _set_param_from_vec(self, v, is_noiseless=False):
self.encoder._set_param_from_vec(v[:self.encoder.param_size], is_noiseless=is_noiseless)
self.decoder._set_param_from_vec(v[self.encoder.param_size:], is_noiseless=is_noiseless)
def get_grad_vec(self):
return np.r_[self.encoder.get_grad_vec(), self.decoder.get_grad_vec()]
def save_model_to_binary(self):
return struct.pack('i', self.get_type_code()) + self._save_model_to_binary()
def _save_model_to_binary(self):
s = self.encoder.save_model_to_binary()
s += self.decoder.save_model_to_binary()
return s
def load_model_from_stream(self, f):
self.check_type_code(struct.unpack('i', f.read(4))[0])
self._load_model_from_stream(f)
def _load_model_from_stream(self, f):
self.encoder = load_rnn_from_stream(f)
self.decoder = load_rnn_from_stream(f)
self._update_param_size()
self.in_dim = self.encoder.in_dim
self.out_dim = self.decoder.out_dim
@staticmethod
def get_type_code():
return 0x0363
def __repr__(self):
return 'Encoder { ' + str(self.encoder) + ' } Decoder { ' + str(self.decoder) + ' }'
def _update_param_size(self):
self.param_size = self.encoder.param_size + self.decoder.param_size
class SequenceLearner(learner.Learner):
"""
RNN trainers.
"""
def load_data(self, x_train, t_train, x_val=None, t_val=None):
x_train = np.array([gnp.as_garray(x) for x in x_train], dtype=np.object)
t_train = np.array([gnp.as_garray(t) for t in t_train], dtype=np.object)
if x_val is not None and t_val is not None:
x_val = np.array([gnp.as_garray(x) for x in x_val], dtype=np.object)
t_val = np.array([gnp.as_garray(t) for t in t_val], dtype=np.object)
super(SequenceLearner, self).load_data(x_train, t_train, x_val=x_val, t_val=t_val)
def load_train_target(self):
pass
def f_and_fprime(self, w):
self.net.set_param_from_vec(w)
self.net.clear_gradient()
loss = 0
grad = None
n_total = 0
for i in xrange(self.x_train.shape[0]):
self.net.load_target(self.t_train[i])
self.net.forward_prop(self.x_train[i], add_noise=True, compute_loss=True, is_test=False)
loss += self.net.get_loss()
self.net.backward_prop()
if grad is None:
grad = self.net.get_grad_vec()
else:
grad += self.net.get_grad_vec()
n_total += self.x_train[i].shape[0]
return loss / n_total, grad / n_total
def f_and_fprime_minibatch(self, w):
x, t = self.minibatch_generator.next()
self.net.set_param_from_vec(w)
self.net.clear_gradient()
loss = 0
grad = None
n_total = 0
for i in xrange(x.shape[0]):
self.net.load_target(t[i])
self.net.forward_prop(x[i], add_noise=True, compute_loss=True, is_test=False)
loss += self.net.get_loss()
self.net.backward_prop()
if grad is None:
grad = self.net.get_grad_vec()
else:
grad += self.net.get_grad_vec()
n_total += x[i].shape[0]
return loss / n_total, grad / n_total
def evaluate_loss_large_set(self, x, t, batch_size=1000):
"""
A function used to evaluate loss on a large set of data. A direct call
to forward_prop may blow up the memory, so this function does it in
smaller batches.
This function will change the target loaded with the network. Return
the average loss across examples for this set.
"""
n_cases = x.shape[0]
loss = 0
for i in xrange(n_cases):
self.net.load_target(t[i])
self.net.forward_prop(x[i], add_noise=False, compute_loss=True, is_test=True)
loss += self.net.get_loss()
return loss / n_cases
def load_rnn_from_stream(f):
type_code = struct.unpack('i', f.read(4))[0]
if type_code == RNN.get_type_code():
net = RNN()
elif type_code == RnnOnNeuralNet.get_type_code():
net = RnnOnNeuralNet()
elif type_code == RnnHybridNetwork.get_type_code():
net = RnnHybridNetwork()
else:
raise Exception('Type code %d not recognized.' % type_code)
net._load_model_from_stream(f)
return net
|
#!/usr/bin/env python3
# vimspector - A multi-language debugging system for Vim
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib import request
import contextlib
import functools
import gzip
import hashlib
import io
import os
import shutil
import ssl
import string
import subprocess
import sys
import tarfile
import time
import traceback
import zipfile
import json
from vimspector import install, gadgets
OUTPUT_VIEW = None
class Options:
vimspector_base = None
no_check_certificate = False
quiet = False
options = Options()
def Configure( **kwargs ):
for k, v in kwargs.items():
setattr( options, k, v )
def Print( *args, **kwargs ):
if not options.quiet:
print( *args, **kwargs )
class MissingExecutable( Exception ):
pass
def GetPATHAsList():
paths = os.environ[ 'PATH' ].split( os.pathsep )
if install.GetOS() == 'windows':
paths.insert( 0, os.getcwd() )
return paths
def FindExecutable( executable: str, paths=None ):
if not paths:
paths = GetPATHAsList()
if install.GetOS() == 'windows':
extensions = [ '.exe', '.bat', '.cmd' ]
else:
extensions = [ '' ]
for extension in extensions:
if executable.endswith( extension ):
candidate = executable
else:
candidate = executable + extension
for path in paths:
filename = os.path.abspath( os.path.join( path, candidate ) )
if not os.path.isfile( filename ):
continue
if not os.access( filename, os.F_OK | os.X_OK ):
continue
return filename
raise MissingExecutable( f"Unable to find executable { executable } in path" )
def CheckCall( cmd, *args, **kwargs ):
cmd[ 0 ] = FindExecutable( cmd[ 0 ] )
if options.quiet:
try:
subprocess.check_output( cmd, *args, stderr=subprocess.STDOUT, **kwargs )
except subprocess.CalledProcessError as e:
print( e.output.decode( 'utf-8' ) )
raise
else:
subprocess.check_call( cmd, *args, **kwargs )
def PathToAnyWorkingPython3():
# We can't rely on sys.executable because it's usually 'vim' (fixme, not with
# neovim?)
paths = GetPATHAsList()
if install.GetOS() == 'windows':
candidates = [ os.path.join( sys.exec_prefix, 'python.exe' ),
'python.exe' ]
else:
candidates = [ os.path.join( sys.exec_prefix, 'bin', 'python3' ),
'python3',
'python' ]
for candidate in candidates:
try:
return FindExecutable( candidate, paths=paths )
except MissingExecutable:
pass
raise RuntimeError( "Unable to find a working python3" )
def RunInstaller( api_prefix, leave_open, *args, **kwargs ):
from vimspector import utils, output, settings
import vim
if not args:
args = settings.List( 'install_gadgets' )
if not args:
return
args = GadgetListToInstallerArgs( *args )
vimspector_home = utils.GetVimValue( vim.vars, 'vimspector_home' )
vimspector_base_dir = utils.GetVimspectorBase()
global OUTPUT_VIEW
_ResetInstaller()
with utils.RestoreCurrentWindow():
vim.command( f'botright { settings.Int( "bottombar_height" ) }new' )
win = vim.current.window
OUTPUT_VIEW = output.OutputView( win, api_prefix )
cmd = [
PathToAnyWorkingPython3(),
'-u',
os.path.join( vimspector_home, 'install_gadget.py' ),
'--quiet',
'--update-gadget-config',
]
if not vimspector_base_dir == vimspector_home:
cmd.extend( [ '--basedir', vimspector_base_dir ] )
cmd.extend( args )
def handler( exit_code ):
if exit_code == 0:
if not leave_open:
_ResetInstaller()
utils.UserMessage( "Vimspector gadget installation complete!" )
vim.command( 'silent doautocmd User VimspectorInstallSuccess' )
if 'then' in kwargs:
kwargs[ 'then' ]()
else:
utils.UserMessage( 'Vimspector gadget installation reported errors',
error = True )
vim.command( 'silent doautocmd User VimspectorInstallFailed' )
OUTPUT_VIEW.RunJobWithOutput( 'Installer',
cmd,
completion_handler = handler,
syntax = 'vimspector-installer' )
OUTPUT_VIEW.ShowOutput( 'Installer' )
def RunUpdate( api_prefix, leave_open, *args ):
from vimspector import utils, settings
Configure( vimspector_base = utils.GetVimspectorBase() )
insatller_args = list( args )
insatller_args.extend( settings.List( 'install_gadgets' ) )
current_adapters = ReadAdapters( read_existing = True )
for adapter_name in current_adapters.keys():
insatller_args.extend( FindGadgetForAdapter( adapter_name ) )
if insatller_args:
insatller_args.append( '--upgrade' )
RunInstaller( api_prefix, leave_open, *insatller_args )
def _ResetInstaller():
global OUTPUT_VIEW
if OUTPUT_VIEW:
OUTPUT_VIEW.Reset()
OUTPUT_VIEW = None
def Abort():
_ResetInstaller()
from vimspector import utils
utils.UserMessage( 'Vimspector installation aborted',
persist = True,
error = True )
def GadgetListToInstallerArgs( *gadget_list ):
installer_args = []
for name in gadget_list:
if name.startswith( '-' ):
installer_args.append( name )
continue
try:
gadget = gadgets.GADGETS[ name ]
except KeyError:
continue
if not gadget.get( 'enabled', True ):
installer_args.append( f'--force-enable-{ gadget[ "language" ] }' )
else:
installer_args.append( f'--enable-{ gadget[ "language" ] }' )
return installer_args
def FindGadgetForAdapter( adapter_name ):
candidates = []
for name, gadget in gadgets.GADGETS.items():
v = {}
v.update( gadget.get( 'all', {} ) )
v.update( gadget.get( install.GetOS(), {} ) )
adapters = {}
adapters.update( v.get( 'adapters', {} ) )
adapters.update( gadget.get( 'adapters', {} ) )
if adapter_name in adapters:
candidates.append( name )
return candidates
class Manifest:
manifest: dict
def __init__( self ):
self.manifest = {}
self.Read()
def Read( self ):
try:
with open( install.GetManifestFile( options.vimspector_base ), 'r' ) as f:
self.manifest = json.load( f )
except OSError:
pass
def Write( self ):
with open( install.GetManifestFile( options.vimspector_base ), 'w' ) as f:
json.dump( self.manifest, f )
def Clear( self, name: str ):
try:
del self.manifest[ name ]
except KeyError:
pass
def Update( self, name: str, gadget_spec: dict ):
self.manifest[ name ] = gadget_spec
def RequiresUpdate( self, name: str, gadget_spec: dict ):
try:
current_spec = self.manifest[ name ]
except KeyError:
# It's new.
return True
# If anything changed in the spec, update
if not current_spec == gadget_spec:
return True
# Always update if the version string is 'master'. Probably a git repo
# that pulls master (which tbh we shouldn't have)
if current_spec.get( 'version' ) in ( 'master', '' ):
return True
if current_spec.get( 'repo', {} ).get( 'ref' ) == 'master':
return True
return False
def ReadAdapters( read_existing = True ):
all_adapters = {}
if read_existing:
try:
with open( install.GetGadgetConfigFile( options.vimspector_base ),
'r' ) as f:
all_adapters = json.load( f ).get( 'adapters', {} )
except OSError:
pass
# Include "built-in" adapter for multi-session mode
all_adapters.update( {
'multi-session': {
'port': '${port}',
'host': '${host}'
},
} )
return all_adapters
def WriteAdapters( all_adapters, to_file=None ):
adapter_config = json.dumps ( { 'adapters': all_adapters },
indent=2,
sort_keys=True )
if to_file:
to_file.write( adapter_config )
else:
with open( install.GetGadgetConfigFile( options.vimspector_base ),
'w' ) as f:
f.write( adapter_config )
def InstallGeneric( name, root, gadget ):
extension_path = gadget.get( 'extension_path', 'extension' )
extension = os.path.join( root, extension_path )
for f in gadget.get( 'make_executable', [] ):
MakeExecutable( os.path.join( extension, f ) )
MakeExtensionSymlink( name, root, extension_path )
def InstallCppTools( name, root, gadget ):
extension = os.path.join( root, 'extension' )
# It's hilarious, but the execute bits aren't set in the vsix. So they
# actually have javascript code which does this. It's just a horrible horrible
# hack that really is not funny.
MakeExecutable( os.path.join( extension, 'debugAdapters', 'OpenDebugAD7' ) )
with open( os.path.join( extension, 'package.json' ) ) as f:
package = json.load( f )
runtime_dependencies = package[ 'runtimeDependencies' ]
for dependency in runtime_dependencies:
for binary in dependency.get( 'binaries' ):
file_path = os.path.abspath( os.path.join( extension, binary ) )
if os.path.exists( file_path ):
MakeExecutable( os.path.join( extension, binary ) )
MakeExtensionSymlink( name, root )
def InstallBashDebug( name, root, gadget ):
MakeExecutable( os.path.join( root, 'extension', 'bashdb_dir', 'bashdb' ) )
MakeExtensionSymlink( name, root )
def InstallDebugpy( name, root, gadget ):
wd = os.getcwd()
root = os.path.join( root, 'debugpy-{}'.format( gadget[ 'version' ] ) )
os.chdir( root )
try:
CheckCall( [ sys.executable, 'setup.py', 'build' ] )
finally:
os.chdir( wd )
MakeSymlink( name, root )
def InstallTclProDebug( name, root, gadget ):
configure = [ 'sh', './configure' ]
if install.GetOS() == 'macos':
# Apple removed the headers from system frameworks because they are
# determined to make life difficult. And the TCL configure scripts are super
# old so don't know about this. So we do their job for them and try and find
# a tclConfig.sh.
#
# NOTE however that in Apple's infinite wisdom, installing the "headers" in
# the other location is actually broken because the paths in the
# tclConfig.sh are pointing at the _old_ location. You actually do have to
# run the package installation which puts the headers back in order to work.
# This is why the below list is does not contain stuff from
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform
# '/Applications/Xcode.app/Contents/Developer/Platforms'
# '/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System'
# '/Library/Frameworks/Tcl.framework',
# '/Applications/Xcode.app/Contents/Developer/Platforms'
# '/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System'
# '/Library/Frameworks/Tcl.framework/Versions'
# '/Current',
for p in [ '/usr/local/opt/tcl-tk/lib' ]:
if os.path.exists( os.path.join( p, 'tclConfig.sh' ) ):
configure.append( '--with-tcl=' + p )
break
with CurrentWorkingDir( os.path.join( root, 'lib', 'tclparser' ) ):
CheckCall( configure )
CheckCall( [ 'make' ] )
MakeSymlink( name, root )
def InstallNodeDebug( name, root, gadget ):
with CurrentWorkingDir( root ):
CheckCall( [ 'npm', 'install' ] )
CheckCall( [ 'npm', 'run', 'build' ] )
MakeSymlink( name, root )
def InstallLuaLocal( name, root, gadget ):
with CurrentWorkingDir( root ):
CheckCall( [ 'npm', 'install' ] )
CheckCall( [ 'npm', 'run', 'build' ] )
MakeSymlink( name, root )
def InstallGagdet( name: str,
gadget: dict,
manifest: Manifest,
succeeded: list,
failed: list,
all_adapters: dict ):
try:
# Spec is an os-specific definition of the gadget
spec = {}
spec.update( gadget.get( 'all', {} ) )
spec.update( gadget.get( install.GetOS(), {} ) )
def save_adapters():
# allow per-os adapter overrides. v already did that for us...
all_adapters.update( spec.get( 'adapters', {} ) )
# add any other "all" adapters
all_adapters.update( gadget.get( 'adapters', {} ) )
if 'download' in gadget:
if 'file_name' not in spec:
raise RuntimeError( "Unsupported OS {} for gadget {}".format(
install.GetOS(),
name ) )
print( f"Installing {name}@{spec[ 'version' ]}..." )
spec[ 'download' ] = gadget[ 'download' ]
if not manifest.RequiresUpdate( name, spec ):
save_adapters()
print( " - Skip - up to date" )
return
destination = os.path.join(
install.GetGadgetDir( options.vimspector_base ),
'download',
name,
spec[ 'version' ] )
url = string.Template( gadget[ 'download' ][ 'url' ] ).substitute( spec )
file_path = DownloadFileTo(
url,
destination,
file_name = gadget[ 'download' ].get( 'target' ),
checksum = spec.get( 'checksum' ),
check_certificate = not options.no_check_certificate )
root = os.path.join( destination, 'root' )
ExtractZipTo(
file_path,
root,
format = gadget[ 'download' ].get( 'format', 'zip' ) )
elif 'repo' in gadget:
url = string.Template( gadget[ 'repo' ][ 'url' ] ).substitute( spec )
ref = string.Template( gadget[ 'repo' ][ 'ref' ] ).substitute( spec )
print( f"Installing {name}@{ref}..." )
spec[ 'repo' ] = gadget[ 'repo' ]
if not manifest.RequiresUpdate( name, spec ):
save_adapters()
print( " - Skip - up to date" )
return
destination = os.path.join(
install.GetGadgetDir( options.vimspector_base ),
'download',
name )
CloneRepoTo( url, ref, destination )
root = destination
if 'do' in gadget:
gadget[ 'do' ]( name, root, spec )
else:
InstallGeneric( name, root, spec )
save_adapters()
manifest.Update( name, spec )
succeeded.append( name )
print( f" - Done installing {name}" )
except Exception as e:
if not options.quiet:
traceback.print_exc()
failed.append( name )
print( f" - FAILED installing {name}: {e}".format( name, e ) )
@contextlib.contextmanager
def CurrentWorkingDir( d ):
cur_d = os.getcwd()
try:
os.chdir( d )
yield
finally:
os.chdir( cur_d )
def MakeExecutable( file_path ):
# TODO: import stat and use them by _just_ adding the X bit.
Print( 'Making executable: {}'.format( file_path ) )
os.chmod( file_path, 0o755 )
def WithRetry( f ):
retries = 5
timeout = 1 # seconds
@functools.wraps( f )
def wrapper( *args, **kwargs ):
thrown = None
for _ in range( retries ):
try:
return f( *args, **kwargs )
except Exception as e:
thrown = e
Print( "Failed - {}, will retry in {} seconds".format( e, timeout ) )
time.sleep( timeout )
raise thrown
return wrapper
@WithRetry
def UrlOpen( *args, **kwargs ):
return request.urlopen( *args, **kwargs )
def DownloadFileTo( url,
destination,
file_name = None,
checksum = None,
check_certificate = True ):
if not file_name:
file_name = url.split( '/' )[ -1 ]
file_path = os.path.abspath( os.path.join( destination, file_name ) )
if not os.path.isdir( destination ):
os.makedirs( destination )
if os.path.exists( file_path ):
if checksum:
if ValidateCheckSumSHA256( file_path, checksum ):
Print( "Checksum matches for {}, using it".format( file_path ) )
return file_path
else:
Print( "Checksum doesn't match for {}, removing it".format(
file_path ) )
Print( "Removing existing {}".format( file_path ) )
os.remove( file_path )
r = request.Request( url, headers = { 'User-Agent': 'Vimspector' } )
Print( "Downloading {} to {}/{}".format( url, destination, file_name ) )
if not check_certificate:
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
kwargs = { "context": context }
else:
kwargs = {}
with contextlib.closing( UrlOpen( r, **kwargs ) ) as u:
with open( file_path, 'wb' ) as f:
f.write( u.read() )
if checksum:
if not ValidateCheckSumSHA256( file_path, checksum ):
raise RuntimeError(
'Checksum for {} ({}) does not match expected {}'.format(
file_path,
GetChecksumSHA254( file_path ),
checksum ) )
else:
Print( "Checksum for {}: {}".format( file_path,
GetChecksumSHA254( file_path ) ) )
return file_path
def GetChecksumSHA254( file_path ):
with open( file_path, 'rb' ) as existing_file:
return hashlib.sha256( existing_file.read() ).hexdigest()
def ValidateCheckSumSHA256( file_path, checksum ):
existing_sha256 = GetChecksumSHA254( file_path )
return existing_sha256 == checksum
def RemoveIfExists( destination ):
try:
os.remove( destination )
Print( "Removed file {}".format( destination ) )
return
except OSError:
pass
N = 1
def BackupDir():
return "{}.{}".format( destination, N )
while os.path.isdir( BackupDir() ):
Print( "Removing old dir {}".format( BackupDir() ) )
try:
shutil.rmtree( BackupDir() )
Print ( "OK, removed it" )
break
except OSError as e:
Print ( f"FAILED to remove {BackupDir()}: {e}" )
N = N + 1
if os.path.exists( destination ):
Print( "Removing dir {}".format( destination ) )
try:
shutil.rmtree( destination )
except OSError:
Print( "FAILED, moving {} to dir {}".format( destination, BackupDir() ) )
os.rename( destination, BackupDir() )
# Python's ZipFile module strips execute bits from files, for no good reason
# other than crappy code. Let's do it's job for it.
class ModePreservingZipFile( zipfile.ZipFile ):
def extract( self, member, path = None, pwd = None ):
if not isinstance( member, zipfile.ZipInfo ):
member = self.getinfo( member )
if path is None:
path = os.getcwd()
ret_val = self._extract_member( member, path, pwd )
attr = member.external_attr >> 16
os.chmod( ret_val, attr )
return ret_val
def ExtractZipTo( file_path, destination, format ):
Print( "Extracting {} to {}".format( file_path, destination ) )
RemoveIfExists( destination )
if format == 'zip':
with ModePreservingZipFile( file_path ) as f:
f.extractall( path = destination )
elif format == 'zip.gz':
with gzip.open( file_path, 'rb' ) as f:
file_contents = f.read()
with ModePreservingZipFile( io.BytesIO( file_contents ) ) as f:
f.extractall( path = destination )
elif format == 'tar':
try:
with tarfile.open( file_path ) as f:
f.extractall( path = destination )
except Exception:
# There seems to a bug in python's tarfile that means it can't read some
# windows-generated tar files
os.makedirs( destination )
with CurrentWorkingDir( destination ):
CheckCall( [ 'tar', 'zxvf', file_path ] )
def MakeExtensionSymlink( name, root, extension_path = 'extension' ):
MakeSymlink( name, os.path.join( root, extension_path ) ),
def MakeSymlink( link, pointing_to, in_folder = None ):
if not in_folder:
in_folder = install.GetGadgetDir( options.vimspector_base )
RemoveIfExists( os.path.join( in_folder, link ) )
in_folder = os.path.abspath( in_folder )
pointing_to_relative = os.path.relpath( os.path.abspath( pointing_to ),
in_folder )
link_path = os.path.join( in_folder, link )
if install.GetOS() == 'windows':
# While symlinks do exist on Windows, they require elevated privileges, so
# let's use a directory junction which is all we need.
link_path = os.path.abspath( link_path )
if os.path.isdir( link_path ):
os.rmdir( link_path )
CheckCall( [ 'cmd.exe', '/c', 'mklink', '/J', link_path, pointing_to ] )
else:
os.symlink( pointing_to_relative, link_path )
def CloneRepoTo( url, ref, destination ):
RemoveIfExists( destination )
git_in_repo = [ 'git', '-C', destination ]
CheckCall( [ 'git', 'clone', url, destination ] )
CheckCall( git_in_repo + [ 'checkout', ref ] )
CheckCall( git_in_repo + [ 'submodule', 'sync', '--recursive' ] )
CheckCall( git_in_repo + [ 'submodule', 'update', '--init', '--recursive' ] )
def AbortIfSUperUser( force_sudo ):
# TODO: We should probably check the effective uid too
is_su = False
if 'SUDO_COMMAND' in os.environ:
is_su = True
if is_su:
if force_sudo:
print( "*** RUNNING AS SUPER USER DUE TO force_sudo! "
" All bets are off. ***" )
else:
sys.exit( "This script should *not* be run as super user. Aborting." )
|
<reponame>shashanksen/beam-nuggets<filename>beam_nuggets/io/kafkaio.py
from __future__ import division, print_function
from apache_beam import PTransform, ParDo, DoFn, Create
from kafka import KafkaConsumer, KafkaProducer
class KafkaConsume(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading from an Apache Kafka topic. This is a streaming
Transform that never returns. The transform uses `KafkaConsumer` from the
`kafka` python library.
It outputs a :class:`~apache_beam.pvalue.PCollection` of
``key-values:s``, each object is a Kafka message in the form (msg-key, msg)
Args:
consumer_config (dict): the kafka consumer configuration. The
supported configurations are those of `KafkaConsumer` from
the `kafka` python library.
Examples:
Consuming from a Kafka Topic `notifications` ::
from __future__ import print_function
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from beam_nuggets.io import kafkaio
kafka_topic = "notifications"
kafka_config = {"topic": kafka_topic,
"bootstrap_servers": "localhost:9092",
"group_id": "notification_consumer_group"}
with beam.Pipeline(options=PipelineOptions()) as p:
notifications = p | "Reading messages from Kafka" >> kafkaio.KafkaConsume(kafka_config)
notifications | 'Writing to stdout' >> beam.Map(print)
The output will be something like ::
("device 1", {"status": "healthy"})
("job #2647", {"status": "failed"})
Where the first element of the tuple is the Kafka message key and the second element is the Kafka message being passed through the topic
"""
def __init__(self, consumer_config, *args, **kwargs):
"""Initializes ``KafkaConsume``
"""
super(KafkaConsume, self).__init__()
self._config = consumer_config
def expand(self, pcoll):
return (
pcoll
| Create([self._config])
| ParDo(_ConsumeKafkaTopic())
)
class _ConsumeKafkaTopic(DoFn):
"""Internal ``DoFn`` to read from Kafka topic and return messages"""
def process(self, config):
consumer_config = dict(config)
topic = consumer_config.pop('topic')
consumer = KafkaConsumer(topic, **consumer_config)
for msg in consumer:
try:
yield (msg.key, msg.value.decode())
except Exception as e:
print(e)
continue
class KafkaProduce(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for pushing messages
into an Apache Kafka topic. This class expects a tuple with the first element being the message key
and the second element being the message. The transform uses `KafkaProducer`
from the `kafka` python library.
Args:
topic: Kafka topic to publish to
servers: list of Kafka servers to listen to
Examples:
Examples:
Pushing message to a Kafka Topic `notifications` ::
from __future__ import print_function
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from beam_nuggets.io import kafkaio
with beam.Pipeline(options=PipelineOptions()) as p:
notifications = (p
| "Creating data" >> beam.Create([('dev_1', '{"device": "0001", status": "healthy"}')])
| "Pushing messages to Kafka" >> kafkaio.KafkaProduce(
topic='notifications',
servers="localhost:9092"
)
)
notifications | 'Writing to stdout' >> beam.Map(print)
The output will be something like ::
("dev_1", '{"device": "0001", status": "healthy"}')
Where the key is the Kafka topic published to and the element is the Kafka message produced
"""
def __init__(self, topic=None, servers='127.0.0.1:9092'):
"""Initializes ``KafkaProduce``
"""
super(KafkaProduce, self).__init__()
self._attributes = dict(
topic=topic,
servers=servers)
def expand(self, pcoll):
return (
pcoll
| ParDo(_ProduceKafkaMessage(self._attributes))
)
class _ProduceKafkaMessage(DoFn):
"""Internal ``DoFn`` to publish message to Kafka topic"""
def __init__(self, attributes, *args, **kwargs):
super(_ProduceKafkaMessage, self).__init__(*args, **kwargs)
self.attributes = attributes
def start_bundle(self):
self._producer = KafkaProducer(bootstrap_servers=self.attributes["servers"])
def finish_bundle(self):
self._producer.close()
def process(self, element):
try:
self._producer.send(self.attributes['topic'], element[1].encode(), key=element[0].encode())
yield element
except Exception as e:
raise
|
<filename>rmexp/dataset/fileutils.py<gh_stars>1-10
#! /usr/bin/env python
import functools
import glob
import importlib
import multiprocessing
import os
import shutil
import fire
from logzero import logger
def rename_files_in_directory_to_sequence(dir_path, ext='jpg'):
"""Rename files to be the format of 0000000000001.ext.
This is useful for ffmpeg/avconv to merge images into videos
Arguments:
dir_path {[type]} -- [description]
Keyword Arguments:
ext {str} -- [description] (default: {'jpg'})
"""
# bk
bk_dir_path = os.path.abspath(dir_path).rstrip(os.sep) + '.bk'
shutil.copytree(dir_path, bk_dir_path)
# remove all files that is going to be removed
remove_file_paths = sorted(glob.glob(os.path.join(dir_path, '*' + ext)))
for file_path in remove_file_paths:
os.remove(file_path)
file_paths = sorted(glob.glob(os.path.join(bk_dir_path, '*.' + ext)))
for idx, file_path in enumerate(file_paths):
shutil.copy(file_path, os.path.join(
dir_path, '{:010d}'.format(idx + 1) + '.' + ext))
shutil.rmtree(bk_dir_path)
def _get_max_trace_num(dir_path):
fs = os.listdir(dir_path)
nums = []
for f in fs:
try:
f_int = int(f)
nums.append(f_int)
except ValueError as e:
pass
assert len(nums) > 0, 'dir_path: {} does not have traces'.format(dir_path)
return min(nums), max(nums)
def get_video_resolution(video_uri):
"""Return video resolution (height, width)
"""
import cv2
cam = cv2.VideoCapture(video_uri)
_, img = cam.read()
if img is None:
raise ValueError(
'Error reading file: {}'.format(video_uri))
cam.release()
return img.shape[:2]
def rename_default_trace(dir_path):
import cv2
import shutil
trace_num_min, trace_num_max = _get_max_trace_num(dir_path)
for i in range(trace_num_min, trace_num_max + 1):
cur_default_trace = os.path.join(dir_path, str(i), 'video.mp4')
if os.path.exists(cur_default_trace) and not os.path.islink(cur_default_trace):
res = get_video_resolution(cur_default_trace)
assert res[0] <= res[1], '{} is not a landscape video!'.format(
cur_default_trace)
max_wh = max(res)
# rename
new_path = os.path.join(dir_path, str(
i), 'video-{}.mp4'.format(max_wh))
logger.debug('mv {} --> {}'.format(cur_default_trace, new_path))
shutil.move(cur_default_trace, new_path)
def _get_highest_resolution_trace_path(trace_dir_path, trace_pattern='video*mp4'):
"""Return the video path with the highest resolution.
Highest is defined by the max of width and height.
"""
pat = os.path.join(trace_dir_path, trace_pattern)
trace_paths = glob.glob(pat)
assert len(
trace_paths) > 0, 'dir_path: {} does not have traces'.format(pat)
# (max(width, height), trace_path)
path_resolution_tuples = [(max(get_video_resolution(trace_path)), trace_path) for
trace_path in trace_paths]
path_resolution_tuples.sort(key=lambda x: x[0], reverse=True)
return path_resolution_tuples[0][1]
def resize_trace(input_path, output_path, width):
import subprocess
cmd = 'ffmpeg -y -i {} -vf scale={}:-2 -crf 18 -vsync passthrough {}'.format(
input_path, width, output_path)
logger.debug('issuing: {}...'.format(cmd))
p = subprocess.Popen(cmd, shell=True)
p.wait()
if p.returncode != 0:
raise ValueError(
'Cmd Error: {} has return code {}'.format(cmd, p.returncode))
def _get_trace_dir_path(app_dataset_dir, trace_ids):
if trace_ids is None:
paths = [os.path.join(app_dataset_dir, trace_id)
for trace_id in os.listdir(app_dataset_dir)
if os.path.isdir(os.path.join(app_dataset_dir, trace_id))]
else:
paths = [os.path.join(app_dataset_dir, str(trace_id))
for trace_id in trace_ids if os.path.isdir(os.path.join(app_dataset_dir, str(trace_id)))]
return paths
def correct_app_dataset_resolution(app, app_dataset_dir_path, trace_ids=None, force=False):
"""Correct default video trace resolution based on app setting.
It will take the max resolution video it finds, scale it
to the desired resolution, and create a symlink.
app: app name
app_dataset_dir_path: trace directory of the app. Within this directory each should be directories
for each trace e.g. 0,1,2.
trace_ids: a list of trace directory names. The convention is to use the id (e.g. 0, 1, 2)
as the directory name.
"""
if trace_ids is not None:
assert type(
trace_ids) == list, 'trace_ids should be a list of trace directory names not {}'.format(
type(trace_ids))
app = importlib.import_module(app)
trace_dir_paths = _get_trace_dir_path(app_dataset_dir_path, trace_ids)
# resize traces in trace_dir_path
for trace_dir_path in trace_dir_paths:
logger.debug('working on trace directory {}...'.format(trace_dir_path))
trace_path = _get_highest_resolution_trace_path(trace_dir_path)
trace_file_name = os.path.basename(trace_path)
default_trace_path = os.path.join(trace_dir_path, 'video.mp4')
# check if existing video.mp4 satisfies the requirements
if os.path.islink(default_trace_path):
if force:
os.unlink(default_trace_path)
else:
shape = get_video_resolution(default_trace_path)
if max(shape) <= app.config.IMAGE_MAX_WH:
continue
os.unlink(default_trace_path)
if max(get_video_resolution(trace_path)) > app.config.IMAGE_MAX_WH:
resized_trace_path = os.path.join(
trace_dir_path, 'video-{}.mp4'.format(app.config.IMAGE_MAX_WH))
resize_trace(trace_path, resized_trace_path,
app.config.IMAGE_MAX_WH)
trace_file_name = 'video-{}.mp4'.format(app.config.IMAGE_MAX_WH)
# create link
if os.path.exists(default_trace_path):
if not os.path.islink(default_trace_path):
raise ValueError('{} is not a link'.format(default_trace_path))
os.symlink(trace_file_name, default_trace_path)
def decode_to_imgs_trace_dir(dir_path, force=False,
video_fname='video.mp4',
output_image_dname='video-images'
):
"""Extract a video.mp4 in a trace directory to a dir of images."""
import subprocess
video_fp = os.path.join(dir_path, video_fname)
if not os.path.exists(video_fp):
raise ValueError('{} does not exist!'.format(video_fp))
img_dir = os.path.join(dir_path, output_image_dname)
if os.path.exists(img_dir):
if force:
logger.debug(
'WARNING: {} exists! Force removing ...'.format(img_dir))
shutil.rmtree(img_dir)
else:
raise ValueError('{} exists!'.format(img_dir))
os.mkdir(img_dir)
# q:v 1 is needed to make extracted jpeg images look reasonable.
# the default value is 24.8
# https://superuser.com/questions/318845/improve-quality-of-ffmpeg-created-jpgs
cmd = 'ffmpeg -i {} -q:v 1 -vsync passthrough -start_number 0 {}/%010d.jpg'.format(
video_fp, img_dir)
logger.debug('issuing: {}'.format(cmd))
p = subprocess.Popen(cmd, shell=True)
p.wait()
if p.returncode != 0:
raise ValueError(
'Cmd Error: {} has return code {}'.format(cmd, p.returncode))
def decode_video_to_imgs_in_app_dataset(app_dataset_dir_path, trace_ids=None, force=False):
"""Extract all traces in dir_path to images."""
if trace_ids is not None:
assert type(
trace_ids) == list, 'trace_ids should be a list of trace directory names not {}'.format(
type(trace_ids))
worker_pool = multiprocessing.Pool(10)
trace_dir_paths = _get_trace_dir_path(app_dataset_dir_path, trace_ids)
job = functools.partial(decode_to_imgs_trace_dir, force=force)
worker_pool.map_async(job, trace_dir_paths)
worker_pool.close()
worker_pool.join()
def get_image_sequence_resolution(image_sequence_path):
import cv2
img_ps = glob.glob(os.path.join(image_sequence_path, '*.jpg'))
assert len(img_ps) > 0
im = cv2.imread(img_ps[0])
return im.shape[:2]
def get_dataset_stats(app, dir_path, store=False):
"""Get statistics of datasets"""
import json
from rmexp import dbutils, schema
from rmexp.schema import models
import cv2
trace_num_min, trace_num_max = _get_max_trace_num(dir_path)
for i in range(trace_num_min, trace_num_max + 1):
default_trace = os.path.join(dir_path, str(i), 'video-images')
resolution = get_image_sequence_resolution(default_trace)
frames = len(glob.glob(os.path.join(default_trace, '*.jpg')))
length = round(frames / 30.0, 1)
stat = json.dumps(
{
'resolution': resolution,
'frames': frames,
'length': length
}
)
logger.info('{} ({}): {}'.format(app, i, stat))
with dbutils.session_scope(dry_run=not store) as sess:
dbutils.insert_or_update_one(
sess,
models.DataStat,
{'app': app, 'trace': str(i)},
{'value': stat}
)
if __name__ == '__main__':
fire.Fire()
|
import json
import urllib.parse
from unittest.mock import Mock, patch
from urllib.parse import parse_qs, urlparse
from django.contrib.sites.models import Site
from django.test import RequestFactory, override_settings
from microsoft_auth.client import MicrosoftClient
from . import TestCase
STATE = "test_state"
CLIENT_ID = "test_client_id"
REDIRECT_URI = "https://testserver/microsoft/auth-callback/"
ACCESS_TOKEN = "test_access_token"
@override_settings(SITE_ID=1)
class ClientTests(TestCase):
@classmethod
def setUpClass(self):
super().setUpClass()
def setUp(self):
super().setUp()
self.factory = RequestFactory()
def _get_auth_url(self, base_url, scopes=MicrosoftClient.SCOPE_MICROSOFT):
args = {
"response_type": "code",
"client_id": CLIENT_ID,
"redirect_uri": REDIRECT_URI,
"scope": " ".join(scopes),
"state": STATE,
"response_mode": "form_post",
}
return (base_url + "?" + urllib.parse.urlencode(args), STATE)
def _assert_auth_url(self, expected, actual):
# parse urls
e_url = urlparse(expected[0])
e_qs = parse_qs(e_url.query)
a_url = urlparse(actual[0])
a_qs = parse_qs(a_url.query)
# assert url
self.assertEqual(e_url.scheme, a_url.scheme)
self.assertEqual(e_url.path, a_url.path)
self.assertEqual(e_url.netloc, a_url.netloc)
self.assertEqual(len(e_qs.items()), len(a_qs.items()))
for key, value in e_qs.items():
self.assertEqual(value, a_qs[key])
# assert state
self.assertEqual(expected[1], actual[1])
def test_scope(self):
expected_scopes = " ".join(MicrosoftClient.SCOPE_MICROSOFT)
auth_client = MicrosoftClient()
self.assertEqual(expected_scopes, auth_client.scope)
def test_state(self):
auth_client = MicrosoftClient(state=STATE)
self.assertEqual(STATE, auth_client.state)
def test_redirect_uri(self):
auth_client = MicrosoftClient()
self.assertEqual(REDIRECT_URI, auth_client.redirect_uri)
@override_settings(MICROSOFT_AUTH_CLIENT_ID=CLIENT_ID)
def test_authorization_url(self):
auth_client = MicrosoftClient(state=STATE)
base_url = auth_client.openid_config["authorization_endpoint"]
expected_auth_url = self._get_auth_url(base_url)
self._assert_auth_url(
expected_auth_url, auth_client.authorization_url()
)
def test_valid_scopes(self):
scopes = MicrosoftClient.SCOPE_MICROSOFT
auth_client = MicrosoftClient()
self.assertTrue(auth_client.valid_scopes(scopes))
def test_valid_scopes_invalid(self):
scopes = ["fake"]
auth_client = MicrosoftClient()
self.assertFalse(auth_client.valid_scopes(scopes))
@override_settings(
SITE_ID=None, ALLOWED_HOSTS=["example.com", "testserver"]
)
def test_alternative_site(self):
self.assertEqual(Site.objects.get(pk=1).domain, "testserver")
Site.objects.create(domain="example.com", name="example.com")
request = self.factory.get("/", HTTP_HOST="example.com")
self.assertEqual(
Site.objects.get_current(request).domain, "example.com"
)
client = MicrosoftClient(request=request)
self.assertIn("example.com", client.authorization_url()[0])
|
<filename>datumaro/tests/test_transforms.py
import logging as log
import numpy as np
from unittest import TestCase
from datumaro.components.project import Dataset
from datumaro.components.extractor import (Extractor, DatasetItem,
Mask, Polygon, PolyLine, Points, Bbox, Label,
LabelCategories, MaskCategories, AnnotationType
)
import datumaro.util.mask_tools as mask_tools
import datumaro.plugins.transforms as transforms
from datumaro.util.test_utils import compare_datasets
class TransformsTest(TestCase):
def test_reindex(self):
class SrcExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=10),
DatasetItem(id=10, subset='train'),
DatasetItem(id='a', subset='val'),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=5),
DatasetItem(id=6, subset='train'),
DatasetItem(id=7, subset='val'),
])
actual = transforms.Reindex(SrcExtractor(), start=5)
compare_datasets(self, DstExtractor(), actual)
def test_mask_to_polygons(self):
class SrcExtractor(Extractor):
def __iter__(self):
items = [
DatasetItem(id=1, image=np.zeros((5, 10, 3)),
annotations=[
Mask(np.array([
[0, 1, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]),
),
]
),
]
return iter(items)
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, image=np.zeros((5, 10, 3)),
annotations=[
Polygon([3.0, 2.5, 1.0, 0.0, 3.5, 0.0, 3.0, 2.5]),
Polygon([5.0, 3.5, 4.5, 0.0, 8.0, 0.0, 5.0, 3.5]),
]
),
])
actual = transforms.MasksToPolygons(SrcExtractor())
compare_datasets(self, DstExtractor(), actual)
def test_mask_to_polygons_small_polygons_message(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)),
annotations=[
Mask(np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0],
]),
),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3))), ])
with self.assertLogs(level=log.DEBUG) as logs:
actual = transforms.MasksToPolygons(source_dataset)
compare_datasets(self, target_dataset, actual)
self.assertRegex('\n'.join(logs.output), 'too small polygons')
def test_polygons_to_masks(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)),
annotations=[
Polygon([0, 0, 4, 0, 4, 4]),
Polygon([5, 0, 9, 0, 5, 5]),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)),
annotations=[
Mask(np.array([
[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]),
),
Mask(np.array([
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]),
),
]
),
])
actual = transforms.PolygonsToMasks(source_dataset)
compare_datasets(self, target_dataset, actual)
def test_crop_covered_segments(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
# The mask is partially covered by the polygon
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]],
),
z_order=0),
Polygon([1, 1, 4, 1, 4, 4, 1, 4],
z_order=1),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],
),
z_order=0),
Polygon([1, 1, 4, 1, 4, 4, 1, 4],
z_order=1),
]
),
])
actual = transforms.CropCoveredSegments(source_dataset)
compare_datasets(self, target_dataset, actual)
def test_merge_instance_segments(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],
),
z_order=0, group=1),
Polygon([1, 1, 4, 1, 4, 4, 1, 4],
z_order=1, group=1),
Polygon([0, 0, 0, 2, 2, 2, 2, 0],
z_order=1),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 0, 0]],
),
z_order=0, group=1),
Mask(np.array([
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
),
z_order=1),
]
),
])
actual = transforms.MergeInstanceSegments(source_dataset,
include_polygons=True)
compare_datasets(self, target_dataset, actual)
def test_map_subsets(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='a'),
DatasetItem(id=2, subset='b'),
DatasetItem(id=3, subset='c'),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset=''),
DatasetItem(id=2, subset='a'),
DatasetItem(id=3, subset='c'),
])
actual = transforms.MapSubsets(source_dataset,
{ 'a': '', 'b': 'a' })
compare_datasets(self, target_dataset, actual)
def test_shapes_to_boxes(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],
), id=1),
Polygon([1, 1, 4, 1, 4, 4, 1, 4], id=2),
PolyLine([1, 1, 2, 1, 2, 2, 1, 2], id=3),
Points([2, 2, 4, 2, 4, 4, 2, 4], id=4),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Bbox(0, 0, 4, 4, id=1),
Bbox(1, 1, 3, 3, id=2),
Bbox(1, 1, 1, 1, id=3),
Bbox(2, 2, 2, 2, id=4),
]
),
])
actual = transforms.ShapesToBoxes(source_dataset)
compare_datasets(self, target_dataset, actual)
def test_id_from_image(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image='path.jpg'),
DatasetItem(id=2),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id='path', image='path.jpg'),
DatasetItem(id=2),
])
actual = transforms.IdFromImageName(source_dataset)
compare_datasets(self, target_dataset, actual)
def test_boxes_to_masks(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Bbox(0, 0, 3, 3, z_order=1),
Bbox(0, 0, 3, 1, z_order=2),
Bbox(0, 2, 3, 1, z_order=3),
]
),
])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
),
z_order=1),
Mask(np.array([
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
),
z_order=2),
Mask(np.array([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
),
z_order=3),
]
),
])
actual = transforms.BoxesToMasks(source_dataset)
compare_datasets(self, target_dataset, actual)
def test_random_split(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset="a"),
DatasetItem(id=2, subset="a"),
DatasetItem(id=3, subset="b"),
DatasetItem(id=4, subset="b"),
DatasetItem(id=5, subset="b"),
DatasetItem(id=6, subset=""),
DatasetItem(id=7, subset=""),
])
actual = transforms.RandomSplit(source_dataset, splits=[
('train', 4.0 / 7.0),
('test', 3.0 / 7.0),
])
self.assertEqual(4, len(actual.get_subset('train')))
self.assertEqual(3, len(actual.get_subset('test')))
def test_random_split_gives_error_on_wrong_ratios(self):
source_dataset = Dataset.from_iterable([DatasetItem(id=1)])
with self.assertRaises(Exception):
transforms.RandomSplit(source_dataset, splits=[
('train', 0.5),
('test', 0.7),
])
with self.assertRaises(Exception):
transforms.RandomSplit(source_dataset, splits=[])
with self.assertRaises(Exception):
transforms.RandomSplit(source_dataset, splits=[
('train', -0.5),
('test', 1.5),
])
def test_remap_labels(self):
src_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
# Should be remapped
Label(1),
Bbox(1, 2, 3, 4, label=2),
Mask(image=np.array([1]), label=3),
# Should be kept
Polygon([1, 1, 2, 2, 3, 4], label=4),
PolyLine([1, 3, 4, 2, 5, 6])
])
], categories={
AnnotationType.label: LabelCategories.from_iterable(
'label%s' % i for i in range(5)),
AnnotationType.mask: MaskCategories(
colormap=mask_tools.generate_colormap(5)),
})
dst_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[
Label(1),
Bbox(1, 2, 3, 4, label=0),
Mask(image=np.array([1]), label=1),
Polygon([1, 1, 2, 2, 3, 4], label=2),
PolyLine([1, 3, 4, 2, 5, 6], label=None)
]),
], categories={
AnnotationType.label: LabelCategories.from_iterable(
['label0', 'label9', 'label4']),
AnnotationType.mask: MaskCategories(colormap={
k: v for k, v in mask_tools.generate_colormap(5).items()
if k in { 0, 1, 3, 4 }
})
})
actual = transforms.RemapLabels(src_dataset, mapping={
'label1': 'label9',
'label2': 'label0',
'label3': 'label9',
}, default='keep')
compare_datasets(self, dst_dataset, actual)
def test_remap_labels_delete_unspecified(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(0) ])
], categories=['label0'])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1),
], categories=[])
actual = transforms.RemapLabels(source_dataset,
mapping={}, default='delete')
compare_datasets(self, target_dataset, actual)
|
"""
Filtering and dataset mapping methods based on training dynamics.
By default, this module reads training dynamics from a given trained model and
computes the metrics---confidence, variability, correctness,
as well as baseline metrics of forgetfulness and threshold closeness
for each instance in the training data.
If specified, data maps can be plotted with respect to confidence and variability.
Moreover, datasets can be filtered with respect any of the other metrics.
"""
import argparse
import json
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import torch
import tqdm
import imageio
from collections import defaultdict
from typing import List
from cartography.data_utils import read_data, read_jsonl, copy_dev_test
from cartography.selection.selection_utils import read_dynamics
# TODO(SS): Named tuple for tasks and filtering methods.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
def compute_forgetfulness(correctness_trend: List[float]) -> int:
"""
Given a epoch-wise trend of train predictions, compute frequency with which
an example is forgotten, i.e. predicted incorrectly _after_ being predicted correctly.
Based on: https://arxiv.org/abs/1812.05159
"""
if not any(correctness_trend): # Example is never predicted correctly, or learnt!
return 1000
learnt = False # Predicted correctly in the current epoch.
times_forgotten = 0
for is_correct in correctness_trend:
if (not learnt and not is_correct) or (learnt and is_correct):
# nothing changed.
continue
elif learnt and not is_correct:
# Forgot after learning at some point!
learnt = False
times_forgotten += 1
elif not learnt and is_correct:
# Learnt!
learnt = True
return times_forgotten
def compute_correctness(trend: List[float]) -> float:
"""
Aggregate #times an example is predicted correctly during all training epochs.
"""
return sum(trend)
def compute_train_dy_metrics_per_epoch(training_dynamics, heuristics, original_id, mode="train"):
"""
Given the training dynamics (logits for each training instance across epochs), compute metrics
based on it, for data map coorodinates.
Computed metrics are: confidence, variability, correctness, forgetfulness, threshold_closeness---
the last two being baselines from prior work
(Example Forgetting: https://arxiv.org/abs/1812.05159 and
Active Bias: https://arxiv.org/abs/1704.07433 respectively).
Returns:
- DataFrame with these metrics.
- DataFrame with more typical training evaluation metrics, such as accuracy / loss.
"""
confidence_ = {}
variability_ = {}
threshold_closeness_ = {}
correctness_ = {}
forgetfulness_ = {}
lexical = {}
constituent = {}
subsequence= {}
original_ids = {}
ood={}
predicted_labels = {}
golds_labels = {}
# Functions to be applied to the data.
variability_func = lambda conf: np.std(conf)
# if include_ci: # Based on prior work on active bias (https://arxiv.org/abs/1704.07433)
# variability_func = lambda conf: np.sqrt(np.var(conf) + np.var(conf) * np.var(conf) / (len(conf)-1))
threshold_closeness_func = lambda conf: conf * (1 - conf)
loss = torch.nn.CrossEntropyLoss()
num_tot_epochs = len(list(training_dynamics.values())[0]["logits"])
# if burn_out < num_tot_epochs:
# logger.info(f"Computing training dynamics. Burning out at {burn_out} of {num_tot_epochs}. ")
# else:
logger.info(f"Computing training dynamics across {num_tot_epochs} epochs")
logger.info("Metrics computed: confidence, variability, correctness, forgetfulness, threshold_closeness")
logits = {i: [] for i in range(num_tot_epochs)}
targets = {i: [] for i in range(num_tot_epochs)}
training_accuracy = defaultdict(float)
for guid in tqdm.tqdm(training_dynamics):
correctness_trend = []
true_probs_trend = []
correctness_ep = []
confidence_ep = []
variability_ep = []
prediction_ep = []
record = training_dynamics[guid]
for i, epoch_logits in enumerate(record["logits"]):
if i >= len(logits.keys()):
break
probs = torch.nn.functional.softmax(torch.Tensor(epoch_logits), dim=-1)
true_class_prob = float(probs[record["gold"]])
true_probs_trend.append(true_class_prob)
prediction = np.argmax(epoch_logits)
is_correct = (prediction == record["gold"]).item()
correctness_trend.append(is_correct)
training_accuracy[i] += is_correct
logits[i].append(epoch_logits)
targets[i].append(record["gold"])
correctness_ep.append(compute_correctness(correctness_trend))
confidence_ep.append(np.mean(true_probs_trend))
variability_ep.append(variability_func(true_probs_trend))
prediction_ep.append(prediction.item())
correctness_[guid] = correctness_ep
confidence_[guid] = confidence_ep
variability_[guid] = variability_ep
# if burn_out < num_tot_epochs:
# correctness_trend = correctness_trend[:burn_out]
# true_probs_trend = true_probs_trend[:burn_out]
# correctness_[guid] = compute_correctness(correctness_trend)
# confidence_[guid] = np.mean(true_probs_trend)
# variability_[guid] = variability_func(true_probs_trend)
# forgetfulness_[guid] = compute_forgetfulness(correctness_trend)
# threshold_closeness_[guid] = threshold_closeness_func(confidence_[guid])
lexical[guid] = heuristics[guid]["lexical"]
constituent[guid] = heuristics[guid]["constituent"]
subsequence[guid] = heuristics[guid]["subsequence"]
ood[guid] = heuristics[guid]["ood"]
original_ids[guid] = original_id[guid]
predicted_labels[guid] = prediction_ep
# Should not affect ranking, so ignoring.
epsilon_var = np.mean(list(variability_.values()))
column_names = ['guid',
'index',
# 'threshold_closeness',
'confidence',
'variability',
'correctness',
# 'forgetfulness',
'pred_label',
'lexical', 'constituent', 'subsequence', 'original_id']
if mode != "train":
column_names.insert(-1, "ood")
df = pd.DataFrame([[guid,
i,
# threshold_closeness_[guid],
confidence_[guid],
variability_[guid],
correctness_[guid],
predicted_labels[guid],
# forgetfulness_[guid],
lexical[guid],
constituent[guid],
subsequence[guid],
ood[guid],
original_ids[guid]
] for i, guid in enumerate(correctness_)], columns=column_names)
df_train = pd.DataFrame([[i,
loss(torch.Tensor(logits[i]), torch.LongTensor(targets[i])).item() / len(
training_dynamics),
training_accuracy[i] / len(training_dynamics)
] for i in range(num_tot_epochs)],
columns=['epoch', 'loss', 'train_acc'])
else:
df = pd.DataFrame([[guid,
i,
# threshold_closeness_[guid],
confidence_[guid],
variability_[guid],
correctness_[guid],
predicted_labels[guid],
# forgetfulness_[guid],
lexical[guid],
constituent[guid],
subsequence[guid],
original_ids[guid]
] for i, guid in enumerate(correctness_)], columns=column_names)
df_train = pd.DataFrame([[i,loss(torch.Tensor(logits[i]), torch.LongTensor(targets[i])).item() / len(training_dynamics),training_accuracy[i] / len(training_dynamics)] for i in range(num_tot_epochs)], columns=['epoch', 'loss', 'train_acc'])
df.to_csv(f"ALL_SAMPLES_{mode}.csv")
return df, df_train
def consider_ascending_order(filtering_metric: str) -> bool:
"""
Determine if the metric values' sorting order to get the most `valuable` examples for training.
"""
if filtering_metric == "variability":
return False
elif filtering_metric == "confidence":
return True
elif filtering_metric == "threshold_closeness":
return False
elif filtering_metric == "forgetfulness":
return False
elif filtering_metric == "correctness":
return True
else:
raise NotImplementedError(f"Filtering based on {filtering_metric} not implemented!")
def write_filtered_data(args, train_dy_metrics):
"""
Filter data based on the given metric, and write it in TSV format to train GLUE-style classifier.
"""
# First save the args for filtering, to keep track of which model was used for filtering.
argparse_dict = vars(args)
with open(os.path.join(args.filtering_output_dir, f"filtering_configs.json"), "w") as outfile:
outfile.write(json.dumps(argparse_dict, indent=4, sort_keys=True) + "\n")
# Determine whether to sort data in ascending order or not, based on the metric.
is_ascending = consider_ascending_order(args.metric)
if args.worst:
is_ascending = not is_ascending
# Sort by selection.
sorted_scores = train_dy_metrics.sort_values(by=[args.metric],
ascending=is_ascending)
original_train_file = os.path.join(os.path.join(args.data_dir, args.task_name), f"train.tsv")
train_numeric, header = read_data(original_train_file, task_name=args.task_name, guid_as_int=True)
for fraction in [0.01, 0.05, 0.10, 0.1667, 0.25, 0.3319, 0.50, 0.75]:
outdir = os.path.join(args.filtering_output_dir,
f"cartography_{args.metric}_{fraction:.2f}/{args.task_name}")
if not os.path.exists(outdir):
os.makedirs(outdir)
# Dev and test need not be subsampled.
copy_dev_test(args.task_name,
from_dir=os.path.join(args.data_dir, args.task_name),
to_dir=outdir)
num_samples = int(fraction * len(train_numeric))
with open(os.path.join(outdir, f"train.tsv"), "w") as outfile:
outfile.write(header + "\n")
selected = sorted_scores.head(n=num_samples+1)
if args.both_ends:
hardest = sorted_scores.head(n=int(num_samples * 0.7))
easiest = sorted_scores.tail(n=num_samples - hardest.shape[0])
selected = pd.concat([hardest, easiest])
fm = args.metric
logger.info(f"Selecting both ends: {fm} = "
f"({hardest.head(1)[fm].values[0]:3f}: {hardest.tail(1)[fm].values[0]:3f}) "
f"& ({easiest.head(1)[fm].values[0]:3f}: {easiest.tail(1)[fm].values[0]:3f})")
selection_iterator = tqdm.tqdm(range(len(selected)))
for idx in selection_iterator:
selection_iterator.set_description(
f"{args.metric} = {selected.iloc[idx][args.metric]:.4f}")
selected_id = selected.iloc[idx]["guid"]
if args.task_name in ["SNLI", "MNLI"]:
selected_id = int(selected_id)
elif args.task_name == "WINOGRANDE":
selected_id = str(int(selected_id))
record = train_numeric[selected_id]
outfile.write(record + "\n")
logger.info(f"Wrote {num_samples} samples to {outdir}.")
def mix_heuristics_label_eval(df):
df_lex_supp = df.loc[(df["lexical"] == 1)& (df["constituent"] == 0) &(df["subsequence"] == 0)]
df_lex_supp['mix_heurstic_label'] = f"lexical support (ood: " \
f"{df_lex_supp.loc[df_lex_supp['ood']==1].shape[0]} id: {df_lex_supp.loc[df_lex_supp['ood']==0].shape[0]})"
df_lex_cont = df.loc[(df["lexical"] == -1) & (df["constituent"] == 0) & (df["subsequence"] == 0)]
df_lex_cont['mix_heurstic_label'] = f"lexical contradict (ood: " \
f"{df_lex_cont.loc[df_lex_cont['ood']==1].shape[0]} id: {df_lex_cont.loc[df_lex_cont['ood']==0].shape[0]})"
df_const_supp = df.loc[(df["lexical"] == 0) & (df["constituent"] == 1) & (df["subsequence"] == 0)]
df_const_supp['mix_heurstic_label'] = f"constituent support (ood: " \
f"{df_const_supp.loc[df_const_supp['ood']==1].shape[0]} id: {df_const_supp.loc[df_const_supp['ood']==0].shape[0]})"
df_const_cont = df.loc[(df["lexical"] == 0) & (df["constituent"] == -1) & (df["subsequence"] == 0)]
df_const_cont['mix_heurstic_label'] = f"constituent contradict (ood: " \
f"{df_const_cont.loc[df_const_cont['ood']==1].shape[0]} id: {df_const_cont.loc[df_const_cont['ood']==0].shape[0]})"
df_sub_supp = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == 1)]
df_sub_supp['mix_heurstic_label'] = f"subsequence support (ood: " \
f"{df_sub_supp.loc[df_sub_supp['ood']==1].shape[0]} id: {df_sub_supp.loc[df_sub_supp['ood']==0].shape[0]})"
df_sub_cont = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == -1)]
df_sub_cont['mix_heurstic_label'] = f"subsequence contradict (ood: " \
f"{df_sub_cont.loc[df_sub_cont['ood']==1].shape[0]} id: {df_sub_cont.loc[df_sub_cont['ood']==0].shape[0]})"
df_mix = pd.concat([df_lex_supp, df_lex_cont, df_const_supp, df_const_cont,
df_sub_supp, df_sub_cont])
return df_mix
def mix_heuristics_label_train(df):
df_lex_supp = df.loc[(df["lexical"] == 1)& (df["constituent"] == 0) &(df["subsequence"] == 0)]
df_lex_supp['mix_heurstic_label'] = f"lexical support ({df_lex_supp.shape[0]}"
df_lex_cont = df.loc[(df["lexical"] == -1) & (df["constituent"] == 0) & (df["subsequence"] == 0)]
df_lex_cont['mix_heurstic_label'] = f"lexical contradict ({df_lex_cont.shape[0]}"
df_const_supp = df.loc[(df["lexical"] == 0) & (df["constituent"] == 1) & (df["subsequence"] == 0)]
df_const_supp['mix_heurstic_label'] = f"constituent support ({df_const_supp.shape[0]}"
df_const_cont = df.loc[(df["lexical"] == 0) & (df["constituent"] == -1) & (df["subsequence"] == 0)]
df_const_cont['mix_heurstic_label'] = f"constituent contradict ({df_const_cont.shape[0]}"
df_sub_supp = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == 1)]
df_sub_supp['mix_heurstic_label'] = f"subsequence support ({df_sub_supp.shape[0]}"
df_sub_cont = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == -1)]
df_sub_cont['mix_heurstic_label'] = f"subsequence contradict ({df_sub_cont.shape[0]}"
df_mix = pd.concat([df_lex_supp, df_lex_cont, df_const_supp, df_const_cont,
df_sub_supp, df_sub_cont])
return df_mix
def get_ambiguous_heuristics_samples(df, model_dir, df_orig=pd.read_csv("/home/jusun/adila001/MNLI/train_heuristic.tsv", sep='\t|\n'),
heu='lexical'):
df = df.loc[(df[heu] != 0)]
df = df.loc[df["variability"] >= 0.3]
df_heuristics_ORIG = df_orig.loc[df['original_id'].tolist()]
df_heuristics_ORIG= df_heuristics_ORIG.drop(['index', 'promptID', 'pairID'], axis=1)
df_heuristics_ORIG['confidence'] = df['confidence'].tolist()
df_heuristics_ORIG['variability'] = df['variability'].tolist()
csv_dir = os.path.join(model_dir, 'unique_samples_csv')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
df_heuristics_ORIG.to_csv(os.path.join(csv_dir, 'ambiguous_samples.csv'))
def get_sorted_samples(df, model_dir, df_orig=pd.read_csv('/home/jusun/adila001/MNLI/train_heuristic.tsv', sep='\t|\n'),
n_sample=30,
decoded_label=["contradiction", "entailment", "neutral"],
columns_order = ['index', 'genre', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'gold_label', 'pred_label'], mode="train"):
csv_dir = os.path.join(model_dir, 'ANALYSIS_CLEAN', 'SORTED')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
# heuristics = top_heuristic_obj.keys()
ep_number = len(df['variability'].tolist()[0])
for ep in range(ep_number):
# for heu in heuristics:
# df_heuristic = df.loc[df[heu] != 0]
df_copy = df.copy()
df_copy['var_ep'] = np.asarray(df_copy['variability'].tolist())[:, ep]
df_copy['conf_ep'] = np.asarray(df_copy['confidence'].tolist())[:, ep]
df_copy['pred_label'] = np.asarray(df_copy['pred_label'].tolist())[:, ep]
df_copy['pred_label'] = [decoded_label[pred] for pred in df_copy['pred_label']]
# random_sample = df_heuristic.sample(n= top_heuristic_obj[heu])
# top_n_var = df_copy.nlargest(n_sample, 'var_ep')
# top_n_conf = df_copy.nsmallest(df_copy.shape[0], 'conf_ep')
top_n_conf = df_copy.sort_values(by=['conf_ep'])
# top_n_var_ORIG = df_orig.loc[top_n_var['original_id'].tolist()]
# # top_n_var_ORIG = top_n_var_ORIG.drop(cols_to_drop, axis=1)
# top_n_var_ORIG['variability'] = top_n_var['variability'].tolist()
# top_n_var_ORIG['confidence'] = top_n_var['confidence'].tolist()
# top_n_var_ORIG['var_ep'] = top_n_var['var_ep'].tolist()
# top_n_var_ORIG['conf_ep'] = top_n_var['conf_ep'].tolist()
# top_n_var_ORIG['pred_label'] = top_n_var['pred_label'].tolist()
top_n_conf_ORIG = df_orig.loc[top_n_conf['original_id'].tolist()]
# top_n_conf_ORIG = top_n_conf_ORIG.drop(cols_to_drop, axis=1)
top_n_conf_ORIG['variability'] = top_n_conf['variability'].tolist()
top_n_conf_ORIG['confidence'] = top_n_conf['confidence'].tolist()
top_n_conf_ORIG['var_ep'] = top_n_conf['var_ep'].tolist()
top_n_conf_ORIG['conf_ep'] = top_n_conf['conf_ep'].tolist()
top_n_conf_ORIG['pred_label'] = top_n_conf['pred_label'].tolist()
# top_n_var_ORIG = top_n_var_ORIG[columns_order]
top_n_conf_ORIG = top_n_conf_ORIG[columns_order]
prefix = mode.upper()
# top_n_var_ORIG.to_csv(os.path.join(csv_dir, "{}_SORTED_VAR_ep_{}.csv".format(prefix, ep)))
top_n_conf_ORIG.to_csv(os.path.join(csv_dir, "{}_CONF_ep_{}_SORTED.csv".format(prefix, ep)))
# return top_n_var_ORIG, top_n_conf_ORIG
def get_top_n_heuristics_samples(df, model_dir, df_orig=pd.read_csv('/home/jusun/adila001/MNLI/train_heuristic.tsv', sep='\t|\n'),
top_heuristic_obj = {'lexical': 20, 'constituent': 20, 'subsequence': 20},
decoded_label=["contradiction", "entailment", "neutral"],
columns_order = ['genre', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'gold_label', 'pred_label']):
csv_dir = os.path.join(model_dir, 'heuristics_only_csv_EVAL')
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
heuristics = top_heuristic_obj.keys()
ep_number = len(df['variability'].tolist()[0])
for ep in range(ep_number):
for heu in heuristics:
df_heuristic = df.loc[df[heu] != 0]
df_heuristic['var_ep'] = np.asarray(df_heuristic['variability'].tolist())[:,ep]
df_heuristic['conf_ep'] = np.asarray(df_heuristic['confidence'].tolist())[:, ep]
df_heuristic['pred_label'] = np.asarray(df_heuristic['pred_label'].tolist())[:, ep]
df_heuristic['pred_label'] = [decoded_label[pred] for pred in df_heuristic['pred_label']]
# random_sample = df_heuristic.sample(n= top_heuristic_obj[heu])
top_n_var = df_heuristic.nlargest(top_heuristic_obj[heu],'var_ep')
top_n_conf = df_heuristic.nlargest(top_heuristic_obj[heu],'conf_ep')
# random_sample_ORIG = df_orig.loc[random_sample['original_id'].tolist()]
# random_sample_ORIG = random_sample_ORIG.drop(['index', 'promptID', 'pairID'], axis=1)
# random_sample_ORIG['variability'] = random_sample['variability'].tolist()
# random_sample_ORIG['confidence'] = random_sample['confidence'].tolist()
# random_sample_ORIG['var_ep'] = random_sample['var_ep'].tolist()
# random_sample_ORIG['conf_ep'] = random_sample['conf_ep'].tolist()
top_n_var_ORIG = df_orig.loc[top_n_var['original_id'].tolist()]
# top_n_var_ORIG = top_n_var_ORIG.drop(cols_to_drop, axis=1)
top_n_var_ORIG['variability'] = top_n_var['variability'].tolist()
top_n_var_ORIG['confidence'] = top_n_var['confidence'].tolist()
top_n_var_ORIG['var_ep'] = top_n_var['var_ep'].tolist()
top_n_var_ORIG['conf_ep'] = top_n_var['conf_ep'].tolist()
top_n_var_ORIG['pred_label'] = top_n_var['pred_label'].tolist()
top_n_conf_ORIG = df_orig.loc[top_n_conf['original_id'].tolist()]
# top_n_conf_ORIG = top_n_conf_ORIG.drop(cols_to_drop, axis=1)
top_n_conf_ORIG['variability'] = top_n_conf['variability'].tolist()
top_n_conf_ORIG['confidence'] = top_n_conf['confidence'].tolist()
top_n_conf_ORIG['var_ep'] = top_n_conf['var_ep'].tolist()
top_n_conf_ORIG['conf_ep'] = top_n_conf['conf_ep'].tolist()
top_n_conf_ORIG['pred_label'] = top_n_conf['pred_label'].tolist()
top_n_var_ORIG = top_n_var_ORIG[columns_order]
top_n_conf_ORIG = top_n_conf_ORIG[columns_order]
# print(random_sample_ORIG)
# print(f"{heu}_EP_{ep}")
# print(top_n_var_ORIG)
# print(top_n_conf_ORIG)
# top_n_var_ORIG.to_csv(os.path.join(csv_dir, "{}_TOP_VAR_ep_{}.csv".format(heu, ep)))
top_n_conf_ORIG.to_csv(os.path.join(csv_dir, "{}_TOP_CONF_ep_{}_LARGEST.csv".format(heu, ep)))
# return top_n_var_ORIG, top_n_conf_ORIG
# random_sample_ORIG.to_csv(os.path.join(csv_dir,"{}_RANDOM.csv".format(heu)),index=False)
# top_n_var_ORIG.to_csv(os.path.join(csv_dir, "{}_TOP_VAR.csv".format(heu)), index=False)
# top_n_conf_ORIG.to_csv(os.path.join(csv_dir, "{}_TOP_CONF.csv".format(heu)), index=False)
def find_max_var(var_arr):
return np.amax(var_arr)
def plot_train_epochs(args, training_dynamics, heuristics, original_id, gif =True):
total_epochs = len(list(training_dynamics.values())[0]["logits"])
df, _ = compute_train_dy_metrics_per_epoch(training_dynamics, heuristics, original_id)
train_dy_filename = os.path.join(args.model_dir, f"td_metrics.jsonl")
df.to_json(train_dy_filename,
orient='records',
lines=True)
logger.info(f"Metrics based on Training Dynamics written to {train_dy_filename}")
df_heuristics = df.loc[(df["lexical"] != 0) | (df["constituent"] != 0) | (df["subsequence"] != 0)]
df_others = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == 0)]
max_instances_heuristic = {
'lexical': df_heuristics.loc[df_heuristics['lexical'] != 0].shape[0],
'subsequence': df_heuristics.loc[df_heuristics['subsequence'] != 0].shape[0],
'constituent': df_heuristics.loc[df_heuristics['constituent'] != 0].shape[0]
}
heuristics = ['lexical', 'constituent', 'subsequence']
max_var = find_max_var(df_heuristics['variability'].tolist())
for heuristic in heuristics:
figs = []
max_instances_to_plot = max_instances_heuristic[heuristic]
df_current_heuristic = df_heuristics.loc[df_heuristics[heuristic] != 0]
# df_others_sampled = df_others.sample(n=max_instances_to_plot-df_current_heuristic.shape[0])
df_others_sampled = df_others.sample(n=df_current_heuristic.shape[0]*2)
df_current_heuristic = df_current_heuristic.append(df_others_sampled, ignore_index=True)
# ### DEV ###
# for ep in range(total_epochs):
# ### DEV ###
for ep in range(2,total_epochs):
df_current_heuristic_epoch = df_current_heuristic.copy()
# print(df_current_heuristic_epoch['confidence'])
confidence_epoch = np.asarray(df_current_heuristic_epoch['confidence'].tolist())[:,ep].flatten()
var_epoch = np.asarray(df_current_heuristic_epoch['variability'].tolist())[:,ep].flatten()
correctness_epoch = np.asarray(df_current_heuristic_epoch['correctness'].tolist())[:,ep].flatten()
df_current_heuristic_epoch.drop(['confidence', 'variability', 'correctness'], axis=1)
df_current_heuristic_epoch['confidence'] = confidence_epoch
df_current_heuristic_epoch['variability'] = var_epoch
df_current_heuristic_epoch['correctness'] = correctness_epoch
fig = plot_heuristics_mix(df_current_heuristic_epoch, os.path.join(args.plots_dir, 'train_plots'), hue_metric=heuristic,
title='{}_epoch_{}'.format(heuristic, ep), max_var=max_var)
figs.append(convert_fig_to_arr(fig))
if gif:
kwargs_write = {'fps': 1.0, 'quantizer': 'nq'}
gif_path = os.path.join(args.plots_dir, "train_plots", f'TRAIN_{ep}_epochs.gif')
# gif_path = f'{args.plots_dir}/{heuristic}_{ep}_epochs.gif'
imageio.mimsave(gif_path, figs, fps=1)
logger.info(f"Aminated gif saved to {gif_path}")
df_heuristics_mix = mix_heuristics_label_train(df_heuristics)
figs = []
# ### DEV ###
# for ep in range(total_epochs):
# ### DEV ###
df_others_sampled = df_others.sample(n=df_heuristics_mix.shape[0] * 2)
for ep in range(2,total_epochs):
df_heuristic_mix_epoch = df_heuristics_mix.copy()
confidence_epoch = np.asarray(df_heuristic_mix_epoch['confidence'].tolist())[:, ep].flatten()
var_epoch = np.asarray(df_heuristic_mix_epoch['variability'].tolist())[:, ep].flatten()
correctness_epoch = np.asarray(df_heuristic_mix_epoch['correctness'].tolist())[:, ep].flatten()
df_heuristic_mix_epoch.drop(['confidence', 'variability', 'correctness'], axis=1)
df_heuristic_mix_epoch['confidence'] = confidence_epoch
df_heuristic_mix_epoch['variability'] = var_epoch
df_heuristic_mix_epoch['correctness'] = correctness_epoch
# df_heuristic_mix_epoch = pd.concat([df_others_sampled, df_heuristic_mix_epoch])
fig = plot_heuristics_only(df_heuristic_mix_epoch,os.path.join(args.plots_dir, "train_plots"),title=f'HEURISTICS_ONLY_{ep}', max_var=max_var)
figs.append(convert_fig_to_arr(fig))
if gif:
kwargs_write = {'fps': 1.0, 'quantizer': 'nq'}
gif_path = os.path.join(args.plots_dir, "train_plots", f'TRAIN_{ep}_epochs_ALL.gif')
# gif_path = f'{args.plots_dir}/HEURISTICS_ONLY_{ep}_epochs.gif'
imageio.mimsave(gif_path, figs, fps=1)
logger.info(f"Aminated gif saved to {gif_path}")
def plot_eval_epochs(args, id_obj, ood_obj, gif =True):
id_dynamics, id_heuristics, id_original_idx, id_pred = id_obj[0], id_obj[1], id_obj[2], id_obj[3]
ood_dynamics, ood_heuristics, ood_original_idx, ood_pred = ood_obj[0], ood_obj[1], ood_obj[2], ood_obj[3]
total_epochs = len(list(id_dynamics.values())[0]["logits"])
df_id, _ = compute_train_dy_metrics_per_epoch(id_dynamics, id_heuristics, id_original_idx, mode="eval")
df_ood, _ = compute_train_dy_metrics_per_epoch(ood_dynamics, ood_heuristics, ood_original_idx, mode="eval")
df_ood['ood'] = 1
df_id['ood'] = 0
id_dy_filename = os.path.join(args.model_dir, f"iid_metrics.jsonl")
df_id.to_json(id_dy_filename,
orient='records',
lines=True)
ood_dy_filename = os.path.join(args.model_dir, f"ood_metrics.jsonl")
df_ood.to_json(ood_dy_filename,
orient='records',
lines=True)
logger.info(f"Metrics based on Eval Dynamics written to {id_dy_filename} and {ood_dy_filename}")
df = pd.concat([df_id, df_ood])
max_var = find_max_var(df['variability'].tolist())
df_heuristics = df.loc[(df["lexical"] != 0) | (df["constituent"] != 0) | (df["subsequence"] != 0)]
df_ood = df.loc[(df["ood"] != 0)]
df_concern = pd.concat([df_heuristics, df_ood])
df_concern = mix_heuristics_label_eval(df_concern)
df_others = df.loc[(df["lexical"] == 0) & (df["constituent"] == 0) & (df["subsequence"] == 0) & (df["ood"] == 0)]
print(df_others.shape)
print(df_ood.shape)
df_others_sample = df_others.sample(n= int(np.ceil(df_ood.shape[0])) if df_ood.shape[0] < df_others.shape[0] else df_others.shape[0] )
df = pd.concat([df_concern, df_others_sample])
df = df.fillna("no heuristic")
print(df_heuristics.shape[0], df_others_sample.shape[0], df_ood.shape[0])
figs = []
palette=iter(sns.husl_palette(len(np.unique(df["mix_heurstic_label"].tolist()))+1))
# ### DEV ###
# for ep in range(total_epochs):
# ### DEV ###
for ep in range(2, total_epochs):
df_heuristic_mix_epoch = df.copy()
confidence_epoch = np.asarray(df_heuristic_mix_epoch['confidence'].tolist())[:, ep].flatten()
var_epoch = np.asarray(df_heuristic_mix_epoch['variability'].tolist())[:, ep].flatten()
correctness_epoch = np.asarray(df_heuristic_mix_epoch['correctness'].tolist())[:, ep].flatten()
df_heuristic_mix_epoch.drop(['confidence', 'variability', 'correctness'], axis=1)
df_heuristic_mix_epoch['confidence'] = confidence_epoch
df_heuristic_mix_epoch['variability'] = var_epoch
df_heuristic_mix_epoch['correctness'] = correctness_epoch
fig = plot_heuristics_only(df_heuristic_mix_epoch, os.path.join(args.plots_dir, "eval_plots"), title=f'EVAL_{ep}',
max_var=max_var, style="ood")
figs.append(convert_fig_to_arr(fig))
if gif:
kwargs_write = {'fps': 1.0, 'quantizer': 'nq'}
gif_path = os.path.join(args.plots_dir, "eval_plots", f'EVAL_{ep}_epochs.gif')
imageio.mimsave(gif_path, figs, fps=1)
logger.info(f"Aminated gif saved to {gif_path}")
def convert_fig_to_arr(fig):
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return image
def compute_train_dy_metrics(training_dynamics, heuristics, original_id, burn_out):
confidence_ = {}
variability_ = {}
threshold_closeness_ = {}
correctness_ = {}
forgetfulness_ = {}
lexical = {}
constituent = {}
subsequence = {}
original_ids = {}
# Functions to be applied to the data.
variability_func = lambda conf: np.std(conf)
threshold_closeness_func = lambda conf: conf * (1 - conf)
loss = torch.nn.CrossEntropyLoss()
num_tot_epochs = len(list(training_dynamics.values())[0]["logits"])
logger.info(f"Computing training dynamics across {num_tot_epochs} epochs")
logger.info("Metrics computed: confidence, variability, correctness, forgetfulness, threshold_closeness")
logits = {i: [] for i in range(num_tot_epochs)}
targets = {i: [] for i in range(num_tot_epochs)}
training_accuracy = defaultdict(float)
for guid in tqdm.tqdm(training_dynamics):
correctness_trend = []
true_probs_trend = []
record = training_dynamics[guid]
for i, epoch_logits in enumerate(record["logits"]):
if i >= len(logits.keys()):
break
probs = torch.nn.functional.softmax(torch.Tensor(epoch_logits), dim=-1)
true_class_prob = float(probs[record["gold"]])
true_probs_trend.append(true_class_prob)
prediction = np.argmax(epoch_logits)
is_correct = (prediction == record["gold"]).item()
correctness_trend.append(is_correct)
training_accuracy[i] += is_correct
logits[i].append(epoch_logits)
targets[i].append(record["gold"])
if burn_out < num_tot_epochs:
correctness_trend = correctness_trend[:burn_out]
true_probs_trend = true_probs_trend[:burn_out]
correctness_[guid] = compute_correctness(correctness_trend)
confidence_[guid] = np.mean(true_probs_trend)
variability_[guid] = variability_func(true_probs_trend)
forgetfulness_[guid] = compute_forgetfulness(correctness_trend)
threshold_closeness_[guid] = threshold_closeness_func(confidence_[guid])
lexical[guid] = heuristics[guid]["lexical"]
constituent[guid] = heuristics[guid]["constituent"]
subsequence[guid] = heuristics[guid]["subsequence"]
original_ids[guid] = original_id[guid]
# Should not affect ranking, so ignoring.
epsilon_var = np.mean(list(variability_.values()))
column_names = ['guid',
'index',
'threshold_closeness',
'confidence',
'variability',
'correctness',
'forgetfulness', 'lexical', 'constituent', 'subsequence', 'original_id']
df = pd.DataFrame([[guid,
i,
threshold_closeness_[guid],
confidence_[guid],
variability_[guid],
correctness_[guid],
forgetfulness_[guid],
lexical[guid],
constituent[guid],
subsequence[guid],
original_ids[guid]
] for i, guid in enumerate(correctness_)], columns=column_names)
df_train = pd.DataFrame([[i,
loss(torch.Tensor(logits[i]), torch.LongTensor(targets[i])).item() / len(
training_dynamics),
training_accuracy[i] / len(training_dynamics)
] for i in range(num_tot_epochs)],
columns=['epoch', 'loss', 'train_acc'])
return df, df_train
def plot_heuristics_only(
df: pd.DataFrame,
plot_dir: os.path,
title: str = '', save=True, max_var=0.5, style=None, palette = None):
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
main_metric = 'variability'
other_metric = 'confidence'
hue = "mix_heurstic_label"
num_hues = len(df[hue].unique().tolist())
fig, ax0 = plt.subplots(1, 1, figsize=(12, 10))
# Choose a palette.
pal = sns.diverging_palette(260, 15, n=num_hues, sep=10, center="dark")
plot = sns.scatterplot(x=main_metric,
y=other_metric,
ax=ax0,
data=df,
hue=hue,
# palette=pal,
# style=hue,
s=30,
style=style,
marker = 'o' if style is not None else None,
# palette="tab10"
# palette=['green', 'orange', 'brown', 'dodgerblue', 'red']
palette = palette if palette else "tab10"
)
# Annotate Regions.
bb = lambda c: dict(boxstyle="round,pad=0.3", ec=c, lw=2, fc="white")
func_annotate = lambda text, xyc, bbc: ax0.annotate(text,
xy=xyc,
xycoords="axes fraction",
fontsize=15,
color='black',
va="center",
ha="center",
rotation=350,
bbox=bb(bbc))
an1 = func_annotate("ambiguous", xyc=(0.9, 0.5), bbc='black')
an2 = func_annotate("easy-to-learn", xyc=(0.27, 0.85), bbc='r')
an3 = func_annotate("hard-to-learn", xyc=(0.35, 0.25), bbc='b')
plot.legend(ncol=1, bbox_to_anchor=[0.175, 0.5], loc='right', fontsize ='small')
plot.set_xlabel('variability')
plot.set_ylabel('confidence')
plot.set_title(title)
ax0.set_xlim(0, max_var)
ax0.set_ylim(0, 1)
fig.tight_layout()
filename = f'{plot_dir}/{title}.png'
if save:
fig.savefig(filename, dpi=300)
logger.info(f"Plot saved to {filename}")
return fig
def plot_heuristics_mix(
df: pd.DataFrame,
plot_dir: os.path,
hue_metric: str = 'lexical',
title: str = '',
save=True,
max_var = 0.5):
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
# Normalize correctness to a value between 0 and 1.
dataframe = df.assign(corr_frac=lambda d: d.correctness / d.correctness.max())
dataframe['correct.'] = [f"{x:.1f}" for x in dataframe['corr_frac']]
main_metric = 'variability'
other_metric = 'confidence'
hue = hue_metric
num_hues = len(dataframe[hue].unique().tolist())
style = hue_metric if num_hues < 8 else None
fig, ax0 = plt.subplots(1, 1, figsize=(8, 6))
# Make the scatterplot.
# Choose a palette.
pal = sns.diverging_palette(260, 15, n=num_hues, sep=10, center="dark")
plot = sns.scatterplot(x=main_metric,
y=other_metric,
ax=ax0,
data=df,
hue=hue,
palette=pal,
style=style,
s=30)
# Annotate Regions.
bb = lambda c: dict(boxstyle="round,pad=0.3", ec=c, lw=2, fc="white")
func_annotate = lambda text, xyc, bbc: ax0.annotate(text,
xy=xyc,
xycoords="axes fraction",
fontsize=15,
color='black',
va="center",
ha="center",
rotation=350,
bbox=bb(bbc))
an1 = func_annotate("ambiguous", xyc=(0.9, 0.5), bbc='black')
an2 = func_annotate("easy-to-learn", xyc=(0.27, 0.85), bbc='r')
an3 = func_annotate("hard-to-learn", xyc=(0.35, 0.25), bbc='b')
plot.legend(ncol=1, bbox_to_anchor=[0.175, 0.5], loc='right')
plot.set_xlabel('variability')
plot.set_ylabel('confidence')
plot.set_title(title)
ax0.set_xlim(0, max_var)
ax0.set_ylim(0, 1)
fig.tight_layout()
filename = f'{plot_dir}/{title}.png'
# print('PLOT HIST', filename)
if save:
fig.savefig(filename, dpi=300)
logger.info(f"Plot saved to {filename}")
return fig
def plot_data_map(dataframe: pd.DataFrame,
plot_dir: os.path,
hue_metric: str = 'correct.',
title: str = '',
model: str = 'RoBERTa',
show_hist: bool = False,
max_instances_to_plot = 55000):
# Set style.
sns.set(style='whitegrid', font_scale=1.6, font='Georgia', context='paper')
logger.info(f"Plotting figure for {title} using the {model} model ...")
# Subsample data to plot, so the plot is not too busy.
dataframe = dataframe.sample(n=max_instances_to_plot if dataframe.shape[0] > max_instances_to_plot else len(dataframe))
# Normalize correctness to a value between 0 and 1.
dataframe = dataframe.assign(corr_frac = lambda d: d.correctness / d.correctness.max())
dataframe['correct.'] = [f"{x:.1f}" for x in dataframe['corr_frac']]
main_metric = 'variability'
other_metric = 'confidence'
hue = hue_metric
num_hues = len(dataframe[hue].unique().tolist())
style = hue_metric if num_hues < 8 else None
if not show_hist:
fig, ax0 = plt.subplots(1, 1, figsize=(8, 6))
else:
fig = plt.figure(figsize=(14, 10), )
gs = fig.add_gridspec(3, 2, width_ratios=[5, 1])
ax0 = fig.add_subplot(gs[:, 0])
# Make the scatterplot.
# Choose a palette.
pal = sns.diverging_palette(260, 15, n=num_hues, sep=10, center="dark")
plot = sns.scatterplot(x=main_metric,
y=other_metric,
ax=ax0,
data=dataframe,
hue=hue,
palette=pal,
style=style,
s=30)
# Annotate Regions.
bb = lambda c: dict(boxstyle="round,pad=0.3", ec=c, lw=2, fc="white")
func_annotate = lambda text, xyc, bbc : ax0.annotate(text,
xy=xyc,
xycoords="axes fraction",
fontsize=15,
color='black',
va="center",
ha="center",
rotation=350,
bbox=bb(bbc))
an1 = func_annotate("ambiguous", xyc=(0.9, 0.5), bbc='black')
an2 = func_annotate("easy-to-learn", xyc=(0.27, 0.85), bbc='r')
an3 = func_annotate("hard-to-learn", xyc=(0.35, 0.25), bbc='b')
if not show_hist:
plot.legend(ncol=1, bbox_to_anchor=[0.175, 0.5], loc='right')
else:
plot.legend(fancybox=True, shadow=True, ncol=1)
plot.set_xlabel('variability')
plot.set_ylabel('confidence')
if show_hist:
plot.set_title(f"{title}-{model} Data Map", fontsize=17)
# Make the histograms.
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 1])
ax3 = fig.add_subplot(gs[2, 1])
plott0 = dataframe.hist(column=['confidence'], ax=ax1, color='#622a87')
plott0[0].set_title('')
plott0[0].set_xlabel('confidence')
plott0[0].set_ylabel('density')
plott1 = dataframe.hist(column=['variability'], ax=ax2, color='teal')
plott1[0].set_title('')
plott1[0].set_xlabel('variability')
plott1[0].set_ylabel('density')
plot2 = sns.countplot(x="correct.", data=dataframe, ax=ax3, color='#86bf91')
ax3.xaxis.grid(True) # Show the vertical gridlines
plot2.set_title('')
plot2.set_xlabel('correctness')
plot2.set_ylabel('density')
fig.tight_layout()
filename = f'{plot_dir}/{title}_{model}.pdf' if show_hist else f'figures/compact_{title}_{model}.png'
print('PLOT ORIGINAL', filename)
fig.savefig(filename, dpi=300)
logger.info(f"Plot saved to {filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--filter",
action="store_true",
help="Whether to filter data subsets based on specified `metric`.")
parser.add_argument("--plot_train",
action="store_true",
help="Whether to plot train data maps and save as `png`.")
parser.add_argument("--plot_eval",
action="store_true",
help="Whether to plot eval data maps and save as `png`.")
parser.add_argument("--model_dir",
"-o",
required=True,
type=os.path.abspath,
help="Directory where model training dynamics stats reside.")
parser.add_argument("--data_dir",
"-d",
default="/Users/swabhas/data/glue/WINOGRANDE/xl/",
type=os.path.abspath,
help="Directory where data for task resides.")
parser.add_argument("--plots_dir",
default="./cartography/",
type=os.path.abspath,
help="Directory where plots are to be saved.")
parser.add_argument("--task_name",
"-t",
default="WINOGRANDE",
choices=("SNLI", "MNLI", "QNLI", "WINOGRANDE", "RTE", "WNLI"),
help="Which task are we plotting or filtering for.")
parser.add_argument('--metric',
choices=('threshold_closeness',
'confidence',
'variability',
'correctness',
'forgetfulness'),
help="Metric to filter data by.",)
parser.add_argument("--include_ci",
action="store_true",
help="Compute the confidence interval for variability.")
parser.add_argument("--filtering_output_dir",
"-f",
default="./filtered/",
type=os.path.abspath,
help="Output directory where filtered datasets are to be written.")
parser.add_argument("--worst",
action="store_true",
help="Select from the opposite end of the spectrum acc. to metric,"
"for baselines")
parser.add_argument("--both_ends",
action="store_true",
help="Select from both ends of the spectrum acc. to metric,")
parser.add_argument("--burn_out",
type=int,
default=100,
help="# Epochs for which to compute train dynamics.")
parser.add_argument("--model",
default="RoBERTa",
help="Model for which data map is being plotted")
parser.add_argument("--plot_gif",
action="store_true",
help="Whether to plot gif or not")
args = parser.parse_args()
# if args.plot_gif:
# assert len(os.listdir(args.plots_dir)) > 0
# plot_gif(args.plots_dir)
# exit()
# total_epochs = len(list(training_dynamics.values())[0]["logits"])
# if args.burn_out > total_epochs:
# args.burn_out = total_epochs
# logger.info(f"Total epochs found: {args.burn_out}")
# train_dy_metrics, _ = compute_train_dy_metrics(training_dynamics, heuristics, original_id, args.burn_out)
#
# burn_out_str = f"_{args.burn_out}" if args.burn_out > total_epochs else ""
# train_dy_filename = os.path.join(args.model_dir, f"td_metrics{burn_out_str}.jsonl")
# train_dy_metrics.to_json(train_dy_filename,
# orient='records',
# lines=True)
# logger.info(f"Metrics based on Training Dynamics written to {train_dy_filename}")
# if args.filter:
# assert args.filtering_output_dir
# if not os.path.exists(args.filtering_output_dir):
# os.makedirs(args.filtering_output_dir)
# assert args.metric
# write_filtered_data(args, train_dy_metrics)
assert args.plots_dir
args.plots_dir = os.path.join(args.model_dir, "plots")
if not os.path.exists(args.plots_dir):
os.makedirs(args.plots_dir)
if args.plot_train:
# plot_data_map(train_dy_metrics, args.plots_dir, title=args.task_name, show_hist=True, model=args.model)
# plot_heuristics_mix(args, train_dy_metrics, args.plots_dir, title=args.task_name)
# plot_heuristics_only(args, train_dy_metrics, args.plots_dir, title=args.task_name)
# get_ambiguous_heuristics_samples(train_dy_metrics, args.model_dir)
# get_top_n_heuristics_samples(train_dy_metrics, args.model_dir)
training_dynamics, heuristics, original_id, pred_labels = read_dynamics(args.model_dir,
strip_last=True if args.task_name in [
"QNLI"] else False,
burn_out=args.burn_out if args.burn_out < 100 else None)
df_train, _ = compute_train_dy_metrics_per_epoch(training_dynamics, heuristics, original_id, mode="eval")
# plot_train_epochs(args, training_dynamics, heuristics, original_id, gif=True)
get_sorted_samples(df_train, args.model_dir,
pd.read_csv('/home/jusun/adila001/RTE/train_heuristic.tsv',
sep='\t|\n'),
decoded_label=["entailment", "not_entailment"],
columns_order=['index', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'lexical',
'subsequence',
'gold_label', 'pred_label'])
# get_top_n_heuristics_samples(df_train, args.model_dir,
# pd.read_csv('/home/jusun/adila001/RTE/train_heuristic.tsv',
# sep='\t|\n'),
# columns_order=['sentence1', 'sentence2', 'variability',
# 'confidence', 'var_ep', 'conf_ep', 'lexical',
# 'subsequence',
# 'gold_label', 'pred_label'],
# decoded_label=["entailment", "not_entailment"],
# top_heuristic_obj={'lexical': 10})
if args.plot_eval:
# get_ambiguous_heuristics_samples(train_dy_metrics, args.model_dir)
eval_ID_dynamics, heuristics_ID, original_id_ID, pred_labels_ID = read_dynamics(args.model_dir,
strip_last=True if args.task_name in [
"QNLI"] else False,
burn_out=args.burn_out if args.burn_out < 100 else None, mode="eval_ID")
eval_OOD_dynamics, heuristics_OOD, original_id_OOD, pred_labels_OOD = read_dynamics(args.model_dir,
strip_last=True if args.task_name in [
"QNLI"] else False,
burn_out=args.burn_out if args.burn_out < 100 else None, mode="eval_OOD")
df_id, _ = compute_train_dy_metrics_per_epoch(eval_ID_dynamics, heuristics_ID, original_id_ID, mode="in_dist")
df_ood, _ = compute_train_dy_metrics_per_epoch(eval_OOD_dynamics, heuristics_OOD, original_id_OOD, mode="ood")
# get_top_n_heuristics_samples(df_id, args.model_dir,
# pd.read_csv('/home/jusun/adila001/MNLI/dev_matched_heuristic.tsv', sep='\t|\n'),
# # decoded_label=["entailment", "not_entailment"],
# columns_order=['sentence1', 'sentence2', 'variability',
# 'confidence', 'var_ep', 'conf_ep', 'lexical', 'constituent',
# 'subsequence',
# 'gold_label', 'pred_label'],
# top_heuristic_obj={'lexical': 30})
# # df_ood['ood'] = 1
# get_top_n_heuristics_samples(df_ood, args.model_dir,
# pd.read_csv('/home/jusun/adila001/WNLI/train_heuristic.tsv', sep='\t|\n'),
# decoded_label=["not_entailment", "entailment"],
# columns_order=['sentence1', 'sentence2', 'variability',
# 'confidence', 'var_ep', 'conf_ep', 'lexical', 'subsequence',
# 'gold_label', 'pred_label'],
# top_heuristic_obj={'lexical':30})
get_sorted_samples(df_id, args.model_dir,
pd.read_csv('/home/jusun/adila001/RTE/dev_heuristic.tsv',
sep='\t|\n'),
decoded_label=["entailment", "not_entailment"],
columns_order=['index', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'lexical',
'subsequence',
'gold_label', 'pred_label'], mode='in_dist')
df_ood['ood'] = 1
get_sorted_samples(df_ood, args.model_dir,
pd.read_csv('/home/jusun/adila001/WNLI/train_heuristic.tsv',
sep='\t|\n'),
decoded_label=["not_entailment", "entailment"],
columns_order=['index', 'sentence1', 'sentence2', 'variability',
'confidence', 'var_ep', 'conf_ep', 'lexical',
'subsequence',
'gold_label', 'pred_label'], mode='ood')
# print(id_conf)
# print(ood_conf)
plot_eval_epochs(args, [eval_ID_dynamics, heuristics_ID, original_id_ID, pred_labels_ID],
[eval_OOD_dynamics, heuristics_OOD, original_id_OOD, pred_labels_OOD], gif=True)
|
<gh_stars>0
import os
import sys
import bpy
import math
from mathutils import Euler, Matrix, Vector
sys.dont_write_bytecode = 1
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
from hyperparameters import f, r, h, p, e, trig_h, \
clip_depth, clip_thickness, clip_height, clip_e
from optics import hex2xy
from meshes import \
basis_arm, \
basis_cap, \
basis_foot, \
basis_leg, \
basis_plate_axis, \
basis_plate_bottom, \
basis_plate_top, \
basis_screw, \
basis_wheel
import importlib
importlib.reload(basis_screw)
importlib.reload(basis_cap)
if bpy.context.scene.objects.get('Camera'):
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
basis_wheel_e = e
basis_wheel_f = f
basis_wheel_t = h
basis_wheel_h = 0.17
basis_wheel_r = r
basis_wheel_p = p # wheel precision
basis_wheel_wr = 0.8 * r # wheel radius
basis_wheel_mr = 0.2 * r
basis_wheel_clip_depth = clip_depth
basis_wheel_clip_thickness = clip_thickness
basis_wheel_clip_height = clip_height
basis_wheel_clip_e = clip_e
basis_wheel_arm_t = 0.01
basis_wheel_top_z = 0
basis_wheel_bottom_z = -basis_wheel_wr * math.sin(math.pi / 3)
basis_arm_e = basis_wheel_e
basis_arm_t = basis_wheel_arm_t
basis_arm_h = basis_wheel_h
basis_arm_w = 1.2 * r
basis_arm_p = 20
basis_arm_z = basis_wheel_bottom_z
basis_arm_wheel_thickness = basis_wheel_t
basis_arm_wheel_radius = basis_wheel_wr
basis_arm_middle_bar_radius = basis_wheel_mr
basis_arm_teeth_width = r
basis_arm_teeth_height = 0.02
basis_arm_teeth_thickness = 0.8 * basis_arm_t
basis_screw_r = basis_wheel_mr - e
basis_screw_length = 0.015
basis_screw_start = 2 * basis_wheel_t + basis_wheel_arm_t
basis_screw_head_r = basis_screw_r + 0.003
basis_screw_head_h = 0.003
basis_screw_p = 100
basis_cap_r = basis_screw_r
basis_cap_t = 0.003
basis_cap_top_h = 0.005
basis_cap_bottom_h = 0.0001
basis_cap_h = basis_cap_bottom_h + 1.5 * basis_screw_length + basis_cap_top_h + basis_cap_t
basis_cap_p = basis_screw_p
basis_cap_head_r = basis_cap_r + 0.01
basis_cap_head_length = 0.01
basis_leg_e = basis_arm_e
basis_leg_t = 2 * basis_arm_t + 2 * basis_arm_e
basis_leg_h = basis_arm_h
basis_leg_w = basis_arm_w
basis_leg_x = -0.5 * basis_arm_wheel_thickness
basis_leg_z = basis_arm_z - basis_arm_h
basis_leg_teeth_width = basis_arm_teeth_width
basis_leg_teeth_height = basis_arm_teeth_height
basis_leg_teeth_thickness = basis_arm_teeth_thickness
basis_leg_side_teeth_width = 0.01
basis_leg_side_teeth_height = 0.015
basis_leg_side_teeth_thickness = 0.6 * basis_leg_t
basis_leg_side_teeth_z = 0.5 * (2 * 0.004 + basis_leg_side_teeth_width)
basis_foot_e = basis_leg_e
basis_foot_t = basis_leg_t
basis_foot_h = 2 * basis_leg_side_teeth_z
basis_foot_w1 = 0.01
basis_foot_w2 = 0.4 * basis_leg_w
basis_foot_x = basis_leg_x
basis_foot_y = -0.5 * basis_leg_w
basis_foot_z = basis_leg_z - basis_leg_e - basis_leg_h + 0.5 * basis_foot_h + basis_leg_teeth_height
basis_foot_horizontal_tooth_width = basis_leg_side_teeth_width
basis_foot_horizontal_tooth_height = basis_leg_side_teeth_height
basis_foot_horizontal_tooth_thickness = basis_leg_side_teeth_thickness
basis_foot_vertical_tooth_width = 0.5 * basis_foot_w2
basis_foot_vertical_tooth_height = 0.6 * basis_leg_teeth_height
basis_foot_vertical_tooth_thickness = 1.5 * basis_leg_teeth_thickness
basis_plate_top_e = basis_foot_e
basis_plate_top_t = basis_leg_teeth_height
basis_plate_top_r = 1.1 * r
basis_plate_top_sr = basis_arm_middle_bar_radius
basis_plate_top_p = 50
basis_plate_top_x = basis_foot_x
basis_plate_top_z = basis_foot_z - 0.5 * basis_foot_h
basis_plate_top_hex_side = r
basis_plate_top_large_tooth_width = basis_leg_teeth_width
basis_plate_top_large_tooth_height = basis_leg_teeth_height
basis_plate_top_large_tooth_thickness = basis_leg_teeth_thickness
basis_plate_top_small_teeth_width = basis_foot_vertical_tooth_width
basis_plate_top_small_teeth_height = basis_foot_vertical_tooth_height
basis_plate_top_small_teeth_thickness = basis_foot_vertical_tooth_thickness
basis_plate_top_leg_width = basis_leg_w
basis_plate_top_foot_w1 = basis_foot_w1
basis_plate_top_foot_w2 = basis_foot_w2
basis_plate_top_foot_thickness = basis_foot_t
basis_plate_axis_e = basis_plate_top_e
basis_plate_axis_t = 0.004
basis_plate_axis_r = basis_plate_top_r * math.cos(math.pi / 6)
basis_plate_axis_p = basis_plate_top_p
basis_plate_axis_x = basis_plate_top_x - (math.sqrt(3) / 2) * r + 0.5 * h
basis_plate_axis_z = basis_plate_top_z - basis_plate_top_t - basis_plate_axis_e
basis_plate_axis_top_t = basis_plate_top_t
basis_plate_axis_top_r = basis_plate_top_sr
basis_plate_axis_bottom_t = basis_plate_top_t
basis_plate_axis_bottom_r = basis_plate_top_sr
basis_plate_bottom_e = basis_plate_top_e
basis_plate_bottom_t = basis_plate_axis_bottom_t
basis_plate_bottom_r = basis_plate_top_r
basis_plate_bottom_sr = basis_plate_axis_bottom_r
basis_plate_bottom_p = basis_plate_top_p
basis_plate_bottom_x = basis_plate_top_x
basis_plate_bottom_z = basis_plate_axis_z - basis_plate_axis_t - basis_plate_bottom_e
basis_plate_bottom_hex_side = basis_plate_top_hex_side
basis_plate_bottom_top_plate_thickness = basis_plate_top_t
def move_basis_to(obj, z):
v1 = hex2xy(r, 0, 0, z, 1)
v2 = hex2xy(r, 0, 0, z, 2)
hex_mid = (
0.5 * (v1[0] + v2[0]),
0.5 * (v1[1] + v2[1])
)
obj.location.x = hex_mid[0]
obj.location.y = hex_mid[1]
obj.location.z = h
obj.rotation_euler = Euler((0, 0, (z + 0.5) * (math.pi / 3)), 'XYZ')
return
basis_collection = bpy.data.collections.new('basis_collection')
bpy.context.scene.collection.children.link(basis_collection)
hex_arrows = [(0, 1), (-1, 2), (-1, 1), (0, -1), (1, -2), (1, -1)]
basis_wheel_mesh_r = basis_wheel.create_mesh(
basis_wheel_e,
basis_wheel_f,
basis_wheel_t,
basis_wheel_h,
basis_wheel_r,
basis_wheel_p,
basis_wheel_wr,
basis_wheel_mr,
basis_wheel_top_z,
h,
trig_h,
Vector((0, 0, 0)),
basis_wheel_clip_depth,
basis_wheel_clip_thickness,
basis_wheel_clip_height,
basis_wheel_clip_e,
basis_wheel_arm_t
)
basis_wheel_mesh_l = basis_wheel.create_mesh(
basis_wheel_e,
basis_wheel_f,
basis_wheel_t,
basis_wheel_h,
basis_wheel_r,
basis_wheel_p,
basis_wheel_wr,
basis_wheel_mr,
basis_wheel_top_z,
h,
trig_h,
Vector((0, 0, 3)),
basis_wheel_clip_depth,
basis_wheel_clip_thickness,
basis_wheel_clip_height,
basis_wheel_clip_e,
basis_wheel_arm_t
)
basis_wheel_screw_mesh = basis_screw.create_mesh(
basis_screw_r,
basis_screw_length,
basis_screw_start,
basis_screw_head_r,
basis_screw_head_h,
basis_screw_p
)
basis_wheel_screw_mesh.transform(
Matrix.Translation(
Vector((
-0.5 * basis_wheel_t \
- 0.5 * basis_wheel_arm_t \
- basis_wheel_t,
0,
basis_wheel_bottom_z
))
)
)
basis_wheel_screw_cap_mesh = basis_cap.create_mesh(
basis_cap_r,
basis_cap_t,
basis_cap_h,
basis_cap_p,
basis_cap_top_h,
basis_cap_bottom_h,
bm = None,
head_r = basis_cap_head_r,
head_length = basis_cap_head_length
)
basis_wheel_screw_cap_mesh.transform(
Matrix.Translation(
Vector((
basis_wheel_arm_t + e,
0,
basis_wheel_bottom_z
))
)
)
basis_arm_mesh = basis_arm.create_mesh(
basis_arm_e,
basis_arm_t,
basis_arm_h,
basis_arm_w,
basis_arm_p,
basis_arm_z,
basis_arm_wheel_thickness,
basis_arm_wheel_radius,
basis_arm_middle_bar_radius,
basis_arm_teeth_width,
basis_arm_teeth_height,
basis_arm_teeth_thickness
)
basis_leg_mesh = basis_leg.create_mesh(
basis_leg_e,
basis_leg_t,
basis_leg_h,
basis_leg_w,
basis_leg_x,
basis_leg_z,
basis_leg_teeth_width,
basis_leg_teeth_height,
basis_leg_teeth_thickness,
basis_leg_side_teeth_width,
basis_leg_side_teeth_height,
basis_leg_side_teeth_thickness,
basis_leg_side_teeth_z
)
basis_foot_mesh_r = basis_foot.create_mesh(
basis_foot_e,
basis_foot_t,
basis_foot_h,
basis_foot_w1,
basis_foot_w2,
basis_foot_x,
basis_foot_y,
basis_foot_z,
basis_foot_horizontal_tooth_width,
basis_foot_horizontal_tooth_height,
basis_foot_horizontal_tooth_thickness,
basis_foot_vertical_tooth_width,
basis_foot_vertical_tooth_height,
basis_foot_vertical_tooth_thickness,
1 # yscale
)
basis_foot_mesh_l = basis_foot.create_mesh(
basis_foot_e,
basis_foot_t,
basis_foot_h,
basis_foot_w1,
basis_foot_w2,
basis_foot_x,
basis_foot_y,
basis_foot_z,
basis_foot_horizontal_tooth_width,
basis_foot_horizontal_tooth_height,
basis_foot_horizontal_tooth_thickness,
basis_foot_vertical_tooth_width,
basis_foot_vertical_tooth_height,
basis_foot_vertical_tooth_thickness,
-1 # yscale
)
basis_plate_top_mesh = basis_plate_top.create_mesh(
basis_plate_top_e,
basis_plate_top_t,
basis_plate_top_r,
basis_plate_top_sr,
basis_plate_top_p,
basis_plate_top_x,
basis_plate_top_z,
basis_plate_top_hex_side,
basis_plate_top_large_tooth_width,
basis_plate_top_large_tooth_height,
basis_plate_top_large_tooth_thickness,
basis_plate_top_small_teeth_width,
basis_plate_top_small_teeth_height,
basis_plate_top_small_teeth_thickness,
basis_plate_top_leg_width,
basis_plate_top_foot_w1,
basis_plate_top_foot_w2,
basis_plate_top_foot_thickness
)
basis_plate_axis_mesh = basis_plate_axis.create_mesh(
basis_plate_axis_e,
basis_plate_axis_t,
basis_plate_axis_r,
basis_plate_axis_p,
basis_plate_axis_x,
basis_plate_axis_z,
basis_plate_axis_top_t,
basis_plate_axis_top_r,
basis_plate_axis_bottom_t,
basis_plate_axis_bottom_r
)
basis_plate_bottom_mesh = basis_plate_bottom.create_mesh(
basis_plate_bottom_e,
basis_plate_bottom_t,
basis_plate_bottom_r,
basis_plate_bottom_sr,
basis_plate_bottom_p,
basis_plate_bottom_x,
basis_plate_bottom_z,
basis_plate_bottom_hex_side,
basis_plate_bottom_top_plate_thickness
)
# print('basis wheel mesh created: ' + str(basis_wheel_mesh))
basis_wheel_object_r = bpy.data.objects.new('basis_wheel_r', basis_wheel_mesh_r)
basis_collection.objects.link(basis_wheel_object_r)
move_basis_to(basis_wheel_object_r, 0)
basis_arm_r = bpy.data.objects.new('basis_arm_r', basis_arm_mesh)
basis_collection.objects.link(basis_arm_r)
move_basis_to(basis_arm_r, 0)
basis_screw_obj_r = bpy.data.objects.new('basis_screw_r', basis_wheel_screw_mesh)
basis_collection.objects.link(basis_screw_obj_r)
move_basis_to(basis_screw_obj_r, 0)
basis_cap_r = bpy.data.objects.new('basis_cap_r', basis_wheel_screw_cap_mesh)
basis_collection.objects.link(basis_cap_r)
move_basis_to(basis_cap_r, 0)
basis_leg_r = bpy.data.objects.new('basis_leg_r', basis_leg_mesh)
basis_collection.objects.link(basis_leg_r)
move_basis_to(basis_leg_r, 0)
basis_foot_rr = bpy.data.objects.new('basis_foot_rr', basis_foot_mesh_r)
basis_collection.objects.link(basis_foot_rr)
move_basis_to(basis_foot_rr, 0)
basis_foot_rl = bpy.data.objects.new('basis_foot_rl', basis_foot_mesh_l)
basis_collection.objects.link(basis_foot_rl)
move_basis_to(basis_foot_rl, 0)
basis_plate_top_r = bpy.data.objects.new('basis_plate_top_r', basis_plate_top_mesh)
basis_collection.objects.link(basis_plate_top_r)
move_basis_to(basis_plate_top_r, 0)
basis_plate_bottom_r = bpy.data.objects.new('basis_plate_bottom_r', basis_plate_bottom_mesh)
basis_collection.objects.link(basis_plate_bottom_r)
move_basis_to(basis_plate_bottom_r, 0)
basis_plate_axis = bpy.data.objects.new('basis_plate_axis', basis_plate_axis_mesh)
basis_collection.objects.link(basis_plate_axis)
move_basis_to(basis_plate_axis, 0)
basis_wheel_object_l = bpy.data.objects.new('basis_wheel_l', basis_wheel_mesh_l)
basis_collection.objects.link(basis_wheel_object_l)
move_basis_to(basis_wheel_object_l, 3)
basis_arm_l = bpy.data.objects.new('basis_arm_l', basis_arm_mesh)
basis_collection.objects.link(basis_arm_l)
move_basis_to(basis_arm_l, 3)
basis_screw_obj_l = bpy.data.objects.new('basis_screw_l', basis_wheel_screw_mesh)
basis_collection.objects.link(basis_screw_obj_l)
move_basis_to(basis_screw_obj_l, 3)
basis_cap_l = bpy.data.objects.new('basis_cap_l', basis_wheel_screw_cap_mesh)
basis_collection.objects.link(basis_cap_l)
move_basis_to(basis_cap_l, 3)
basis_leg_l = bpy.data.objects.new('basis_leg_l', basis_leg_mesh)
basis_collection.objects.link(basis_leg_l)
move_basis_to(basis_leg_l, 3)
basis_foot_lr = bpy.data.objects.new('basis_foot_lr', basis_foot_mesh_r)
basis_collection.objects.link(basis_foot_lr)
move_basis_to(basis_foot_lr, 3)
basis_foot_ll = bpy.data.objects.new('basis_foot_ll', basis_foot_mesh_l)
basis_collection.objects.link(basis_foot_ll)
move_basis_to(basis_foot_ll, 3)
basis_plate_top_l = bpy.data.objects.new('basis_plate_top_l', basis_plate_top_mesh)
basis_collection.objects.link(basis_plate_top_l)
move_basis_to(basis_plate_top_l, 3)
basis_plate_bottom_l = bpy.data.objects.new('basis_plate_bottom_l', basis_plate_bottom_mesh)
basis_collection.objects.link(basis_plate_bottom_l)
move_basis_to(basis_plate_bottom_l, 3)
print('done')
# bpy.ops.export_mesh.stl(filepath="C:\\Users\\Count\\Documents\\projects\\hexcope\\stl\\", check_existing=True, filter_glob='*.stl', global_scale=1000.0, use_scene_unit=False, ascii=False, use_mesh_modifiers=True, batch_mode='OBJECT', axis_forward='Y', axis_up='Z', use_selection=False) |
import asyncio
import logging
import pickle
from dataclasses import dataclass
from datetime import datetime, timedelta
from operator import itemgetter
from typing import Any, Dict, List, Optional, Union
from uuid import uuid4
import aioredis
from aioredis import MultiExecError, Redis
from .constants import job_key_prefix, queue_name, result_key_prefix
from .jobs import Job
from .utils import timestamp_ms, to_ms, to_unix_ms
logger = logging.getLogger('arq.connections')
@dataclass
class RedisSettings:
"""
No-Op class used to hold redis connection redis_settings.
Used by :func:`arq.connections.create_pool` and :class:`arq.worker.Worker`.
"""
host: str = 'localhost'
port: int = 6379
database: int = 0
password: str = <PASSWORD>
conn_timeout: int = 1
conn_retries: int = 5
conn_retry_delay: int = 1
def __repr__(self):
return '<RedisSettings {}>'.format(' '.join(f'{k}={v}' for k, v in self.__dict__.items()))
# extra time after the job is expected to start when the job key should expire, 1 day in ms
expires_extra_ms = 86_400_000
class ArqRedis(Redis):
"""
Thin subclass of ``aioredis.Redis`` which adds :func:`arq.connections.enqueue_job`.
"""
async def enqueue_job(
self,
function: str,
*args: Any,
_job_id: Optional[str] = None,
_defer_until: Optional[datetime] = None,
_defer_by: Union[None, int, float, timedelta] = None,
_expires: Union[None, int, float, timedelta] = None,
_job_try: Optional[int] = None,
**kwargs: Any,
) -> Optional[Job]:
"""
Enqueue a job.
:param function: Name of the function to call
:param args: args to pass to the function
:param _job_id: ID of the job, can be used to enforce job uniqueness
:param _defer_until: datetime at which to run the job
:param _defer_by: duration to wait before running the job
:param _expires: if the job still hasn't started after this duration, do not run it
:param _job_try: useful when re-enqueueing jobs within a job
:param kwargs: any keyword arguments to pass to the function
:return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists
"""
job_id = _job_id or uuid4().hex
job_key = job_key_prefix + job_id
assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms(_defer_by)
expires_ms = to_ms(_expires)
with await self as conn:
pipe = conn.pipeline()
pipe.unwatch()
pipe.watch(job_key)
job_exists = pipe.exists(job_key)
await pipe.execute()
if await job_exists:
return
enqueue_time_ms = timestamp_ms()
if _defer_until is not None:
score = to_unix_ms(_defer_until)
elif defer_by_ms:
score = enqueue_time_ms + defer_by_ms
else:
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle.dumps((enqueue_time_ms, _job_try, function, args, kwargs))
tr = conn.multi_exec()
tr.psetex(job_key, expires_ms, job)
tr.zadd(queue_name, score, job_id)
try:
await tr.execute()
except MultiExecError:
# job got enqueued since we checked 'job_exists'
return
return Job(job_id, self)
async def _get_job_result(self, key):
job_id = key[len(result_key_prefix) :]
job = Job(job_id, self)
r = await job.result_info()
r['job_id'] = job_id
return r
async def all_job_results(self) -> List[Dict]:
"""
Get results for all jobs in redis.
"""
keys = await self.keys(result_key_prefix + '*')
results = await asyncio.gather(*[self._get_job_result(k) for k in keys])
return sorted(results, key=itemgetter('enqueue_time'))
async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis:
"""
Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails.
Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance,
thus allowing job enqueuing.
"""
settings = settings or RedisSettings()
addr = settings.host, settings.port
try:
pool = await aioredis.create_redis_pool(
addr,
db=settings.database,
password=<PASSWORD>,
timeout=settings.conn_timeout,
encoding='utf8',
commands_factory=ArqRedis,
)
except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e:
if _retry < settings.conn_retries:
logger.warning(
'redis connection error %s:%s %s %s, %d retries remaining...',
settings.host,
settings.port,
e.__class__.__name__,
e,
settings.conn_retries - _retry,
)
await asyncio.sleep(settings.conn_retry_delay)
else:
raise
else:
if _retry > 0:
logger.info('redis connection successful')
return pool
# recursively attempt to create the pool outside the except block to avoid
# "During handling of the above exception..." madness
return await create_pool(settings, _retry=_retry + 1)
async def log_redis_info(redis, log_func):
with await redis as r:
info, key_count = await asyncio.gather(r.info(), r.dbsize())
log_func(
f'redis_version={info["server"]["redis_version"]} '
f'mem_usage={info["memory"]["used_memory_human"]} '
f'clients_connected={info["clients"]["connected_clients"]} '
f'db_keys={key_count}'
)
|
import os
import sys
import pygame
import random
from code.tools.mixer import BGMixer
from code.controllers.intervalcontroller import IntervalController
from code.constants.sound import *
# load_sound just quickly loads a sound file and returns it.
def load_sound(path):
class NoneSound:
def play(self): pass
if ( ( not pygame.mixer ) or ( not pygame.mixer.get_init() ) ):
log( "Failed to load sound" )
return NoneSound()
sound = None
try:
sound = pygame.mixer.Sound(path)
except pygame.error, message:
log( 'Cannot load sound:', path )
raise SystemExit, message
return sound
class SoundController:
def __init__(self):
# Track background music volume
self.background_volume = 0.75
# Track background music percentage (music
# can fade during dialogue, etc.).
# Defaults to 100%.
self.background_ratio_controller = IntervalController(
interval = 1.0,
target = 1.0,
speed_in = 0.010,
speed_out = 0.0075
)
# Track sound effect volume
self.sfx_volume = 0.75
# Create a background track mixer
self.mixer = BGMixer(
self.get_background_volume()
)
# Load sound effects
self.sound_effects = {
SFX_MENU_CURSOR: [ load_sound( os.path.join("sound", "sfx", "tick%d.wav" % o) ) for o in (1, 2, 3, 4) ],
SFX_MENU_SELECT: [ load_sound( os.path.join("sound", "sfx", "beep%d.wav" % o) ) for o in (2,) ],
#SFX_PLAYER_DIG: load_sound("dig3.wav"),
#SFX_PLAYER_BOMB: load_sound("dig1.wav"),
#SFX_BOMB_EXPLODE: load_sound("bomb1.wav"),
#SFX_BOMB_TICK: load_sound("bomb_tick.wav"),
SFX_PLAYER_GRAB_GOLD: [ load_sound( os.path.join("sound", "sfx", "gold%d.wav" % o) ) for o in (1, 2, 3, 4, 5) ],
SFX_PLAYER_WALK: [ load_sound( os.path.join("sound", "sfx", "steps%d.wav" % o) ) for o in (1, 2) ],
SFX_PLAYER_DIG: [ load_sound( os.path.join("sound", "sfx", "dig%d.wav" % o) ) for o in (1, 2, 3, 4) ],
SFX_NEWS: [ load_sound( os.path.join("sound", "sfx", "news1.wav") ) ],
SFX_CONFIRM: [ load_sound( os.path.join("sound", "sfx", "query1.wav") ) ],
SFX_PLACE_BOMB: [ load_sound( os.path.join("sound", "sfx", "placebomb%d.wav" % o) ) for o in (1, 2) ],
SFX_BOMB_EXPLODE: [ load_sound( os.path.join("sound", "sfx", "explode%d.wav" % o) ) for o in (1, 2) ]
}
# Keep a list of queued sound effect types
self.sfx_queue = []
# Set background volume
def set_background_volume(self, volume, permanent = True):
# Set permanently?
if (permanent):
self.background_volume = volume
# Update the mixer
self.mixer.set_volume(volume)
# Set background maximum percentage
def set_background_ratio(self, ratio):
# Set new interval target
self.background_ratio_controller.set_target(ratio)
# Set sound effects volume
def set_sfx_volume(self, volume):
# Set
self.sfx_volume = volume
# Get background volume
def get_background_volume(self):
# Return
return self.background_volume
# Get sound effects volume
def get_sfx_volume(self):
# Return
return self.sfx_volume
# Queue a sound effect
def queue_sound(self, index):
# Add to the queue
self.sfx_queue.append(index)
# Process sound-related stuff
def process(self, universe):
""" Debug """
#self.sfx_queue = []
#return
""" End Debug """
# Check to see if the current background track has ended
if ( not self.mixer.is_playing() ):
# Loop to next track
self.mixer.load_next_track()
# Check to see if the background music ratio has changed
if ( self.background_ratio_controller.get_interval() != self.background_ratio_controller.get_target() ):
# Set raw background volume level
self.set_background_volume(
self.background_ratio_controller.get_interval() * self.get_background_volume(),
permanent = False
)
# Process background music ratio controller
self.background_ratio_controller.process()
# Check for queued sound effects
while ( len(self.sfx_queue) > 0 ):
index = self.sfx_queue.pop(0)
if (index in self.sound_effects):
sounds = self.sound_effects[index]
sound = sounds[ random.randint(0, len(sounds) - 1) ]
# Set volume level for the sound
sound.set_volume(
self.get_sfx_volume()
)
sound.play()
#each.queued_sound = None
|
#@title Quick_CNN { display-mode: "both" }
# # coding: utf-8
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from functools import reduce
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
# initial = tf.random_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def conv_layer(pre_layer, depth):
pre_layer_shape = pre_layer.get_shape().as_list()
layer_shape = [3, 3, pre_layer_shape[-1], depth]
w = tf.Variable(tf.truncated_normal(layer_shape, stddev=0.1))
b = tf.constant(0.1, shape=[depth])
h_conv = tf.nn.relu(tf.nn.conv2d(pre_layer, w, strides=[1, 1, 1, 1], padding='SAME') + b)
h_pool = tf.nn.max_pool(h_conv, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
return h_pool
if __name__ == '__main__':
mnist = input_data.read_data_sets('/content/GoogleDrive/Python27/MNIST_data', one_hot=True)
batch_size = 32
with tf.name_scope('Input'):
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
x_image = tf.reshape(x, [-1,28,28,1])
# x_image_sum = tf.summary.image('input_images', x_image)
# ------------------conv-----------------------------------
layer = x_image
for layer_i in range(4):
layer = conv_layer(layer, 64)
# --------------fc--------------------------------------
layer_shape = layer.get_shape().as_list()
num_f = reduce(lambda a,b:a * b, layer_shape[1:])
# num_f = layer_shape[]
W_fc1 = weight_variable([num_f, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(layer, [-1, num_f])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
with tf.name_scope('Loss'):
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope('Train'):
train_step = tf.train.AdamOptimizer(5e-4).minimize(cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
accuracy_sum = tf.summary.scalar('accuracy', accuracy)
sess = tf.Session()
# sess.run(tf.initialize_all_variables())
# writer = tf.summary.FileWriter("E:\Anaconda2\Programs\Tensorboard", sess.graph)
sess.run(tf.global_variables_initializer())
# merged = tf.summary.merge([x_image_sum, w_conv1_sum, loss_sum, accuracy_sum, conv1_output])
# saver = tf.train.Saver(max_to_keep=1) # 定义保存3个模型
# max_acc = 0
for i in range(100):
batch = mnist.train.next_batch(batch_size)
# sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
_, acc, loss = sess.run([train_step, accuracy, cross_entropy], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
# writer.add_summary(rs, i)
step = i+1
print("When the cross_entropy is %.2f, accuracy on training data at step %s is %.2f ." %(loss, step, acc))
if i%10 == 0:
print('\n')
test_accuracy = sess.run(accuracy, feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
print('Accuracy on testing data is %.2f .' %(test_accuracy))
# sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
# if (acc > max_acc) & (i > 399): # 保存精度高的三个模型
# max_acc = acc
# saver.save(sess, r'E:\Anaconda2\Programs\Tensorboard\f_map.ckpt', global_step=i+1)
# test_image, test_label = mnist.test.images[100,:].reshape((1,-1)), mnist.test.labels[0,:].reshape((1,-1))
# features1 = sess.run(h_pool1, feed_dict={x: test_image, y_: test_label, keep_prob: 1.0})
# print("test accuracy %g"%sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
sess.close()
|
<reponame>souravrhythm/opendp
def test_sized_bounded_float_sum():
"""known-n bounded float sum (assuming n is public)"""
from opendp.trans import make_split_dataframe, make_select_column, \
make_cast, make_impute_constant, \
make_clamp, make_bounded_resize, make_sized_bounded_sum
from opendp.meas import make_base_laplace, make_base_gaussian
from opendp.mod import binary_search_chain, enable_features
enable_features("floating-point", "contrib")
size = 200
bounds = (0., 20.)
preprocess = (
# Convert csv string into a dataframe of String columns
make_split_dataframe(",", ['A', 'B']) >>
# Selects a column of df, Vec<str>
make_select_column("A", TOA=str) >>
# Cast the column as Vec<Optional<Float>>
make_cast(TIA=str, TOA=float) >>
# Impute missing values to 0, emit Vec<Float>
make_impute_constant(constant=0.) >>
# Clamp values
make_clamp(bounds=bounds) >>
# Resize dataset length
make_bounded_resize(size=size, bounds=bounds, constant=0.) >>
# Aggregate with sum
make_sized_bounded_sum(size=size, bounds=bounds)
)
# Add noise such that when d_in=1, the result is 1 epsilon DP
laplace_known_n_sum_from_dataframe = binary_search_chain(
lambda s: preprocess >> make_base_laplace(s),
d_in=1, d_out=1.)
gaussian_known_n_sum_from_dataframe = binary_search_chain(
lambda s: preprocess >> make_base_gaussian(s),
d_in=1, d_out=(1., 1e-5))
assert laplace_known_n_sum_from_dataframe.check(1, 1.)
data = "\n".join(["1"] * size)
print(laplace_known_n_sum_from_dataframe(data))
print(gaussian_known_n_sum_from_dataframe(data))
def test_sized_bounded_int_sum():
"""known-n bounded int sum (assuming n is public)"""
from opendp.trans import make_split_dataframe, make_select_column, \
make_cast, make_impute_constant, \
make_clamp, make_bounded_resize, make_sized_bounded_sum
from opendp.meas import make_base_geometric
from opendp.mod import binary_search_chain, enable_features
enable_features("floating-point", "contrib")
size = 200
bounds = (0, 20)
preprocess = (
# Convert csv string into a dataframe of String columns
make_split_dataframe(",", ['A', 'B']) >>
# Selects a column of df, Vec<str>
make_select_column("A", TOA=str) >>
# Cast the column as Vec<Optional<int>>
make_cast(TIA=str, TOA=int) >>
# Impute missing values to 0, emit Vec<int>
make_impute_constant(constant=0) >>
# Clamp values
make_clamp(bounds=bounds) >>
# Resize dataset length
make_bounded_resize(size=size, bounds=bounds, constant=0) >>
# Aggregate with sum
make_sized_bounded_sum(size=size, bounds=bounds)
)
noisy_known_n_sum_from_dataframe = binary_search_chain(
lambda s: preprocess >> make_base_geometric(s),
d_in=1, d_out=1.)
assert noisy_known_n_sum_from_dataframe.check(1, 1.)
data = "\n".join(["1"] * size)
print(noisy_known_n_sum_from_dataframe(data))
def test_bounded_float_sum():
"""bounded float sum (assuming n is unknown)"""
from opendp.trans import make_split_dataframe, make_select_column, \
make_cast, make_impute_constant, \
make_clamp, make_bounded_sum
from opendp.meas import make_base_laplace, make_base_gaussian
from opendp.mod import binary_search_chain, enable_features
enable_features("floating-point")
bounds = (0., 20.)
preprocess = (
# Convert csv string into a dataframe of String columns
make_split_dataframe(",", ['A', 'B']) >>
# Selects a column of df, Vec<str>
make_select_column("A", TOA=str) >>
# Cast the column as Vec<Optional<float>>
make_cast(TIA=str, TOA=float) >>
# Impute missing values to 0, emit Vec<float>
make_impute_constant(constant=0.) >>
# Clamp values
make_clamp(bounds=bounds) >>
# Aggregate with sum. Resize is not necessary with make_bounded_sum, only make_sized_bounded_sum
make_bounded_sum(bounds=bounds)
)
laplace_sum_from_dataframe = binary_search_chain(
lambda s: preprocess >> make_base_laplace(s),
d_in=1, d_out=1.)
gaussian_sum_from_dataframe = binary_search_chain(
lambda s: preprocess >> make_base_gaussian(s),
d_in=1, d_out=(1., 1e-5))
assert laplace_sum_from_dataframe.check(1, 1.)
data = "\n".join(["1"] * 100)
print(laplace_sum_from_dataframe(data))
print(gaussian_sum_from_dataframe(data))
def test_bounded_int_sum():
"""bounded int sum (assuming n is unknown)"""
from opendp.trans import make_split_dataframe, make_select_column, \
make_cast, make_impute_constant, \
make_clamp, make_bounded_sum
from opendp.meas import make_base_geometric
from opendp.mod import binary_search_chain
bounds = (0, 20)
preprocess = (
make_split_dataframe(",", ['A', 'B']) >>
make_select_column("A", TOA=str) >>
make_cast(TIA=str, TOA=int) >>
make_impute_constant(constant=0) >>
make_clamp(bounds=bounds) >>
make_bounded_sum(bounds=bounds)
)
noisy_sum_from_dataframe = binary_search_chain(
lambda s: preprocess >> make_base_geometric(s),
d_in=1, d_out=1.)
assert noisy_sum_from_dataframe.check(1, 1.)
data = "\n".join(["1"] * 100)
print(noisy_sum_from_dataframe(data))
|
<gh_stars>0
from SPARQLWrapper import SPARQLWrapper, JSON
from collections import defaultdict
def get_games_based_on_genre(genre,sparql):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?game ?game_name
WHERE{
?game a mgns:Game .
?game mgns:hasGenre ?genre .
?genre a mgns:Genre .
?genre rdfs:label ''' +str(genre)+'''@en .
?game schema:name ?game_name .
}
LIMIT 20
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results
def get_games_having_rating_higher(rating,sparql):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?game ?game_name ?rating
WHERE{
?game a mgns:Game .
?game mgns:ratingValue ?rating .
FILTER(?rating > ''' +str(rating)+''')
?game schema:name ?game_name .
}
LIMIT 20
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results
def get_game_based_on_price_and_seller_url(lower_price,higher_price,sparql):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
SELECT ?game ?game_name ?price ?seller_url
WHERE{
?game a mgns:Game .
?game mgns:price_USD ?price .
FILTER(?price > '''+str(lower_price)+ ''' && ?price < ''' + str(higher_price) + ''')
?game schema:name ?game_name .
?game mgns:sellerURL ?seller_url
}
LIMIT 20
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results
def get_info(game_id):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
#SELECT ?game_summary ?name ?released_year ?platform_name ?developer_name ?publisher_name ?game_mode_label ?genre_label ?theme_label ?#rating ?seller_name ?price ?discount ?url
SELECT ?game_summary ?name ?released_year ?platform_name ?developer_name ?publisher_name ?game_mode_label ?genre_label ?theme_label ?rating ?seller_name ?price ?discount ?url
WHERE{
mgns:'''+game_id+''' a mgns:Game ;
schema:name ?name ;
schema:description ?game_summary ;
schema:datePublished ?released_year ;
OPTIONAL{mgns:'''+game_id+''' mgns:supportedPlatform ?platform ;
mgns:platformName ?platform_name} .
OPTIONAL{mgns:'''+game_id+''' mgns:developedBy ?developer ;
schema:name ?developer_name } .
OPTIONAL{mgns:'''+game_id+''' mgns:publishedBy ?publisher ;
schema:name ?publisher_name} .
OPTIONAL{mgns:'''+game_id+''' mgns:hasGameMode ?game_mode ;
rdfs:label ?game_mode_label }.
OPTIONAL{mgns:'''+game_id+''' mgns:hasGenre ?genre ;
rdfs:label ?genre_label }.
OPTIONAL{mgns:'''+game_id+''' mgns:hasTheme ?theme ;
rdfs:label ?theme_label}.
OPTIONAL{mgns:'''+game_id+''' mgns:ratingValue ?rating} .
OPTIONAL{mgns:'''+game_id+''' mgns:soldBy ?seller;
schema:name ?seller_name} .
OPTIONAL{mgns:'''+game_id+''' mgns:price_USD ?price} .
OPTIONAL{mgns:'''+game_id+''' mgns:discount_percent ?discount} .
OPTIONAL{mgns:'''+game_id+''' mgns:sellerURL ?url} .
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results
def released_year_query(released_year,sparql):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
select ?game ?name ?date
where{
?game a mgns:Game .
?game schema:name ?name .
?game schema:datePublished ?date .
FILTER(?date = 2009)
}
''')
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
res = defaultdict(lambda:list())
for result in results['results']['bindings']:
res[result['game']['value']].append((result['name']['value'],result['date']['value']))
return res
def genre(genre,sparql):
sparql.setQuery('''
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX mgns: <http://inf558.org/games#>
PREFIX schema: <http://schema.org/>
select ?game ?name ?genre
where{
?game a mgns:Game .
?game schema:name ?name .
?game mgns:hasGenre ?genre .
?genre rdfs:label ''' +str(genre)+'''@en .
}
'''
)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
res = defaultdict(lambda: list())
for result in results['results']['bindings']:
res[result['game']['value']].append((result['name']['value'], genre))
return res
if __name__ == '__main__':
sparql = SPARQLWrapper("http://localhost:3030/games/query")
# Query games based on genre
'''genre = '"Adventure"'
results = get_games_based_on_genre(genre,sparql)
for result in results['results']['bindings']:
print("Game URI: ",result['game']['value'],end = ' ')
print("Game Name: ",result['game_name']['value'])'''
# Query game names based on rating
'''rating = 80
results = get_games_having_rating_higher(rating,sparql)
for result in results['results']['bindings']:
print("Game URI: ", result['game']['value'], end=' ')
print("Game Name: ", result['game_name']['value'], end = ' ')
print("Rating: ", result['rating']['value'])'''
# Query game and seller url based on price range
'''lower_price = 10
higher_price = 20
results = get_game_based_on_price_and_seller_url(lower_price,higher_price,sparql)
for result in results['results']['bindings']:
print("Game URI: ", result['game']['value'], end=' ')
print("Game Name: ", result['game_name']['value'], end = ' ')
print("Game Price: ",result['price']['value'], end = ' ')
print('Seller URL: ',result['seller_url']['value'])'''
# Query games based on game id
'''results = get_info('mig_3')
game_info_dict = defaultdict(lambda: set())
for result in results['results']['bindings']:
for key in result.keys():
game_info_dict[key].add(result[key]['value'])
for key in game_info_dict.keys():
game_info_dict[key] = list(game_info_dict[key])
print(game_info_dict)'''
# Query based on released year
release_y = released_year_query(2009,sparql)
gen = genre('"Adventure"',sparql)
int_key = set(list(release_y.keys())).intersection(set(list(gen.keys())))
print(len(int_key))
|
<filename>cyberradiodriver/CyberRadioDriver/log.py
#!/usr/bin/env python
###############################################################
# \package CyberRadioDriver.log
#
# Logging support for objects within the driver.
#
# \author NH
# \author DA
# \author MN
# \copyright Copyright (c) 2014-2021 CyberRadio Solutions, Inc.
# All rights reserved.
#
###############################################################
# Imports from other modules in this package
# Imports from external modules
# Python standard library imports
import json
import sys
import time
#from matplotlib import verbose
##
# Base class for objects that produce log output.
#
# This base class consumes the following keyword arguments in
# its constructor:
# -- "verbose": a Boolean value (defaults to True) that
# controls verbose mode
# -- "logFile": Indicates where the log output should go. This
# is an open file or a file-like object that can consume the
# log output. The default is standard output. If specified
# as None, log output will be disabled altogether.
class _logger(object):
##
# Constructor
def __init__(self, *args, **kwargs):
# Consume this class's keyword arguments.
self.verbose = kwargs.get("verbose", True)
self.logFile = kwargs.get("logFile", sys.stdout)
# def __myClassName(self):
# ret = str(self.__class__).replace("<class ","").replace("'","").replace(">","")
# return ret
##
# Writes output to the log.
#
# \param args A variable-length list of items to write to the
# log. Each item needs to be representable as a string. Items
# are separated by spaces in the output.
def log(self, *args):
if self.logFile is not None:
myname = str(self).strip()
message = " ".join([str(q) for q in args])
#self.logFile.write("[%s] " % self.__myClassName())
if myname is None:
self.logFile.write("%s :: %s" % (time.strftime("%x %X"),
message))
else:
# This construct looks strange, but the idea behind it is: "Don't
# print my so-called 'name' if my string representation is
# actually a JSON command".
try:
json.loads(myname)
self.logFile.write("%s :: %s" % (time.strftime("%x %X"),
message))
except:
self.logFile.write("%s %s :: %s" % (time.strftime("%x %X"),
myname, message))
self.logFile.write("\n")
self.logFile.flush()
##
# Writes verbose-mode output to the log.
#
# \param args A variable-length list of items to write to the
# log. Each item needs to be representable as a string. Items
# are separated by spaces in the output.
def logIfVerbose(self, *args):
if self.verbose:
self.log(*args)
##
# Gets whether or not the object is in verbose mode.
#
# \returns True if verbose mode is set, False otherwise.
def getVerbose(self):
return self.verbose
##
# Sets whether or not the object is in verbose mode.
#
# \param verbose True if verbose mode is set, False otherwise.
def setVerbose(self, verbose):
self.verbose = verbose
##
# Gets the object's log file.
#
# \returns The file or file-like object used for logging.
def getLogFile(self):
return self.logFile
##
# Sets the object's log file.
#
# \param logFile The file or file-like object used for logging. If None,
# disables logging.
def setLogFile(self, logFile):
self.logFile = logFile
|
<reponame>juanjtov/Twitter_PNL_PUBLIC
import pandas as pd
import plotly
import re
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from nltk.probability import FreqDist
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import itertools
import math
import time
import datetime
from config import Settings
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# Filter constants for states in COL
COUNTRIES = ['Argentina', 'ARG', 'Bolivia', 'BOL', 'Brazil', 'BRA', 'Chile', 'CHL', 'Colombia', 'COL', 'Ecuador', 'ECU', 'Falkland', 'Islands', 'FLK', 'French', 'Guiana', 'GUF', 'Guiana', 'GUF', 'Guyana', 'GUY', 'Paraguay', 'PRY', 'Peru', 'PER', 'Suriname', 'SUR', 'Uruguay', 'URY', 'Venezuela', 'VEN']
COUNTRY_DICT = dict(itertools.zip_longest(*[iter(COUNTRIES)] * 2, fillvalue=""))
INV_COUNTRY_DICT = dict((v,k) for k,v in COUNTRY_DICT.items())
def clean_transform_data(df):
#As we only display real-time tweets posted in last 30 minutes, groups of 2-second interval could best display on the screen in practice
result = df.groupby([pd.Grouper(key='created_at', freq='2s'), 'polarity']).count().unstack(fill_value=0).stack().reset_index()
result = result.rename(columns={"id_tweet": "Num of {} mentions".format(Settings.TRACK_WORDS), "created_at":"Time in UTC"})
#Record the time series with 2-second interval for further index usage.
time_series = result["Time in UTC"][result['polarity']==0].reset_index(drop=True)
return result, time_series
def plot_results(result, time_series, fd, geo_dist):
#The data frame must have information
fig = make_subplots(
rows=2, cols=2,
column_widths=[1, 0.4],
row_heights=[0.6, 0.4],
specs=[[{"type": "scatter", "rowspan": 2}, {"type": "choropleth"}],
[ None , {"type": "bar"}]],
subplot_titles= ("Sentiment analysis", "Words Frequency", "", "Countries In Action")
)
fig.add_trace(go.Scatter(
x=time_series,
y=result["Num of {} mentions".format(Settings.TRACK_WORDS)][result['polarity']==0].reset_index(drop=True),
name="Neutral",
opacity=0.8), row=1, col=1)
fig.add_trace(go.Scatter(
x=time_series,
y=result["Num of {} mentions".format(Settings.TRACK_WORDS)][result['polarity']==-1].reset_index(drop=True),
name="Negative",
opacity=0.8), row=1, col=1)
fig.add_trace(go.Scatter(
x=time_series,
y=result["Num of {} mentions".format(Settings.TRACK_WORDS)][result['polarity']==1].reset_index(drop=True),
name="Positive",
opacity=0.8), row=1, col=1)
# Plot Bar chart
fig.add_trace(go.Bar(x=fd["Word"], y=fd["Frequency"], name="Freq Dist"), row=2, col=2)
# 59, 89, 152
fig.update_traces(marker_color='rgb(59, 89, 152)', marker_line_color='rgb(8,48,107)', \
marker_line_width=0.5, opacity=0.7, row=2, col=2)
#chloropleth graph
fig.add_trace(go.Choropleth(
locations=geo_dist['Country'], # Spatial coordinates
z = geo_dist['Log Num'].astype(float), # Data to be color-coded
#locationmode = 'USA-states', it takes ISO BY DEFAULT # set of locations match entries in `locations`
colorscale = "Blues",
text=geo_dist['text'], # hover text
showscale=True,
geo = 'geo'
),
row=1, col=2)
fig.update_layout(
title_text= "Real-time tracking '{}' mentions on Twitter {} UTC".format(Settings.TRACK_WORDS ,datetime.datetime.utcnow().strftime('%m-%d %H:%M')),
geo = dict(
scope='south america',
),
template="plotly_dark",
margin=dict(r=20, t=50, b=50, l=90),
annotations=[
go.layout.Annotation(
text="Source: Twitter",
showarrow=False,
xref="paper",
yref="paper",
x=0,
y=0)
],
showlegend=True,
xaxis_rangeslider_visible=True)
# subplot_titles= ("Sentiment analysis", "Words Frequency", "", "Countries In Action")
fig.show()
def pnl_module(df):
content = ' '.join(df["text"])
content = re.sub(r"http\S+", "", content)
content = content.replace('RT ', ' ').replace('&', 'and')
content = re.sub('[^A-Za-z0-9]+', ' ', content)
content = content.lower()
tokenized_word = word_tokenize(content)
#Extra fine filter
tokenized_word = [word for word in tokenized_word if len(word)>3]
stop_words=set(stopwords.words("spanish"))
filtered_sent=[]
for w in tokenized_word:
if w not in stop_words:
filtered_sent.append(w)
fdist = FreqDist(filtered_sent)
fd = pd.DataFrame(fdist.most_common(15), columns = ["Word","Frequency"]).drop([0]).reindex()
return fd
def geo_distr_data(df):
is_in_SA=[]
#geo = df[['user_location']]
df = df.fillna(" ")
for x in df['user_location']:
check = False
for c in COUNTRIES:
if c in x:
is_in_SA.append(COUNTRY_DICT[c] if c in COUNTRY_DICT else c)
check = True
break
if not check:
is_in_SA.append(None)
geo_dist = pd.DataFrame(is_in_SA, columns=['Country']).dropna().reset_index()
geo_dist = geo_dist.groupby('Country').count().rename(columns={"index": "Number"}) \
.sort_values(by=['Number'], ascending=False).reset_index()
geo_dist["Log Num"] = geo_dist["Number"].apply(lambda x: math.log(x, 2))
geo_dist['Full Country Name'] = geo_dist['Country'].apply(lambda x: INV_COUNTRY_DICT[x])
geo_dist['text'] = geo_dist['Full Country Name'] + '<br>' + 'Num: ' + geo_dist['Number'].astype(str)
return geo_dist
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class CloudLoadTestClient(Client):
"""CloudLoadTest
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(CloudLoadTestClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '7ae6d0a6-cda5-44cf-a261-28c392bed25c'
def create_agent_group(self, group):
"""CreateAgentGroup.
[Preview API]
:param :class:`<AgentGroup> <azure.devops.v5_1.cloud_load_test.models.AgentGroup>` group: Agent group to be created
:rtype: :class:`<AgentGroup> <azure.devops.v5_1.cloud_load_test.models.AgentGroup>`
"""
content = self._serialize.body(group, 'AgentGroup')
response = self._send(http_method='POST',
location_id='ab8d91c1-12d9-4ec5-874d-1ddb23e17720',
version='5.1-preview.1',
content=content)
return self._deserialize('AgentGroup', response)
def get_agent_groups(self, agent_group_id=None, machine_setup_input=None, machine_access_data=None, outgoing_request_urls=None, agent_group_name=None):
"""GetAgentGroups.
[Preview API]
:param str agent_group_id: The agent group indentifier
:param bool machine_setup_input:
:param bool machine_access_data:
:param bool outgoing_request_urls:
:param str agent_group_name: Name of the agent group
:rtype: object
"""
route_values = {}
if agent_group_id is not None:
route_values['agentGroupId'] = self._serialize.url('agent_group_id', agent_group_id, 'str')
query_parameters = {}
if machine_setup_input is not None:
query_parameters['machineSetupInput'] = self._serialize.query('machine_setup_input', machine_setup_input, 'bool')
if machine_access_data is not None:
query_parameters['machineAccessData'] = self._serialize.query('machine_access_data', machine_access_data, 'bool')
if outgoing_request_urls is not None:
query_parameters['outgoingRequestUrls'] = self._serialize.query('outgoing_request_urls', outgoing_request_urls, 'bool')
if agent_group_name is not None:
query_parameters['agentGroupName'] = self._serialize.query('agent_group_name', agent_group_name, 'str')
response = self._send(http_method='GET',
location_id='ab8d91c1-12d9-4ec5-874d-1ddb23e17720',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('object', response)
def delete_static_agent(self, agent_group_id, agent_name):
"""DeleteStaticAgent.
[Preview API]
:param str agent_group_id: The agent group identifier
:param str agent_name: Name of the static agent
:rtype: str
"""
route_values = {}
if agent_group_id is not None:
route_values['agentGroupId'] = self._serialize.url('agent_group_id', agent_group_id, 'str')
query_parameters = {}
if agent_name is not None:
query_parameters['agentName'] = self._serialize.query('agent_name', agent_name, 'str')
response = self._send(http_method='DELETE',
location_id='87e4b63d-7142-4b50-801e-72ba9ff8ee9b',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def get_static_agents(self, agent_group_id, agent_name=None):
"""GetStaticAgents.
[Preview API]
:param str agent_group_id: The agent group identifier
:param str agent_name: Name of the static agent
:rtype: object
"""
route_values = {}
if agent_group_id is not None:
route_values['agentGroupId'] = self._serialize.url('agent_group_id', agent_group_id, 'str')
query_parameters = {}
if agent_name is not None:
query_parameters['agentName'] = self._serialize.query('agent_name', agent_name, 'str')
response = self._send(http_method='GET',
location_id='87e4b63d-7142-4b50-801e-72ba9ff8ee9b',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('object', response)
def get_application(self, application_id):
"""GetApplication.
[Preview API]
:param str application_id: Filter by APM application identifier.
:rtype: :class:`<Application> <azure.devops.v5_1.cloud_load_test.models.Application>`
"""
route_values = {}
if application_id is not None:
route_values['applicationId'] = self._serialize.url('application_id', application_id, 'str')
response = self._send(http_method='GET',
location_id='2c986dce-8e8d-4142-b541-d016d5aff764',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('Application', response)
def get_applications(self, type=None):
"""GetApplications.
[Preview API]
:param str type: Filters the results based on the plugin type.
:rtype: [Application]
"""
query_parameters = {}
if type is not None:
query_parameters['type'] = self._serialize.query('type', type, 'str')
response = self._send(http_method='GET',
location_id='2c986dce-8e8d-4142-b541-d016d5aff764',
version='5.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[Application]', self._unwrap_collection(response))
def get_counters(self, test_run_id, group_names, include_summary=None):
"""GetCounters.
[Preview API]
:param str test_run_id: The test run identifier
:param str group_names: Comma separated names of counter groups, such as 'Application', 'Performance' and 'Throughput'
:param bool include_summary:
:rtype: [TestRunCounterInstance]
"""
route_values = {}
if test_run_id is not None:
route_values['testRunId'] = self._serialize.url('test_run_id', test_run_id, 'str')
query_parameters = {}
if group_names is not None:
query_parameters['groupNames'] = self._serialize.query('group_names', group_names, 'str')
if include_summary is not None:
query_parameters['includeSummary'] = self._serialize.query('include_summary', include_summary, 'bool')
response = self._send(http_method='GET',
location_id='29265ea4-b5a5-4b2e-b054-47f5f6f00183',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TestRunCounterInstance]', self._unwrap_collection(response))
def get_application_counters(self, application_id=None, plugintype=None):
"""GetApplicationCounters.
[Preview API]
:param str application_id: Filter by APM application identifier.
:param str plugintype: Currently ApplicationInsights is the only available plugin type.
:rtype: [ApplicationCounters]
"""
query_parameters = {}
if application_id is not None:
query_parameters['applicationId'] = self._serialize.query('application_id', application_id, 'str')
if plugintype is not None:
query_parameters['plugintype'] = self._serialize.query('plugintype', plugintype, 'str')
response = self._send(http_method='GET',
location_id='c1275ce9-6d26-4bc6-926b-b846502e812d',
version='5.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[ApplicationCounters]', self._unwrap_collection(response))
def get_counter_samples(self, counter_sample_query_details, test_run_id):
"""GetCounterSamples.
[Preview API]
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v5_1.cloud_load_test.models.VssJsonCollectionWrapper>` counter_sample_query_details:
:param str test_run_id: The test run identifier
:rtype: :class:`<CounterSamplesResult> <azure.devops.v5_1.cloud_load_test.models.CounterSamplesResult>`
"""
route_values = {}
if test_run_id is not None:
route_values['testRunId'] = self._serialize.url('test_run_id', test_run_id, 'str')
content = self._serialize.body(counter_sample_query_details, 'VssJsonCollectionWrapper')
response = self._send(http_method='POST',
location_id='bad18480-7193-4518-992a-37289c5bb92d',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('CounterSamplesResult', response)
def get_load_test_run_errors(self, test_run_id, type=None, sub_type=None, detailed=None):
"""GetLoadTestRunErrors.
[Preview API]
:param str test_run_id: The test run identifier
:param str type: Filter for the particular type of errors.
:param str sub_type: Filter for a particular subtype of errors. You should not provide error subtype without error type.
:param bool detailed: To include the details of test errors such as messagetext, request, stacktrace, testcasename, scenarioname, and lasterrordate.
:rtype: :class:`<LoadTestErrors> <azure.devops.v5_1.cloud_load_test.models.LoadTestErrors>`
"""
route_values = {}
if test_run_id is not None:
route_values['testRunId'] = self._serialize.url('test_run_id', test_run_id, 'str')
query_parameters = {}
if type is not None:
query_parameters['type'] = self._serialize.query('type', type, 'str')
if sub_type is not None:
query_parameters['subType'] = self._serialize.query('sub_type', sub_type, 'str')
if detailed is not None:
query_parameters['detailed'] = self._serialize.query('detailed', detailed, 'bool')
response = self._send(http_method='GET',
location_id='b52025a7-3fb4-4283-8825-7079e75bd402',
version='5.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('LoadTestErrors', response)
def get_test_run_messages(self, test_run_id):
"""GetTestRunMessages.
[Preview API]
:param str test_run_id: Id of the test run
:rtype: [TestRunMessage]
"""
route_values = {}
if test_run_id is not None:
route_values['testRunId'] = self._serialize.url('test_run_id', test_run_id, 'str')
response = self._send(http_method='GET',
location_id='2e7ba122-f522-4205-845b-2d270e59850a',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('[TestRunMessage]', self._unwrap_collection(response))
def get_plugin(self, type):
"""GetPlugin.
[Preview API]
:param str type: Currently ApplicationInsights is the only available plugin type.
:rtype: :class:`<ApplicationType> <azure.devops.v5_1.cloud_load_test.models.ApplicationType>`
"""
route_values = {}
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='7dcb0bb2-42d5-4729-9958-c0401d5e7693',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('ApplicationType', response)
def get_plugins(self):
"""GetPlugins.
[Preview API]
:rtype: [ApplicationType]
"""
response = self._send(http_method='GET',
location_id='7dcb0bb2-42d5-4729-9958-c0401d5e7693',
version='5.1-preview.1')
return self._deserialize('[ApplicationType]', self._unwrap_collection(response))
def get_load_test_result(self, test_run_id):
"""GetLoadTestResult.
[Preview API]
:param str test_run_id: The test run identifier
:rtype: :class:`<TestResults> <azure.devops.v5_1.cloud_load_test.models.TestResults>`
"""
route_values = {}
if test_run_id is not None:
route_values['testRunId'] = self._serialize.url('test_run_id', test_run_id, 'str')
response = self._send(http_method='GET',
location_id='5ed69bd8-4557-4cec-9b75-1ad67d0c257b',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('TestResults', response)
def create_test_definition(self, test_definition):
"""CreateTestDefinition.
[Preview API]
:param :class:`<TestDefinition> <azure.devops.v5_1.cloud_load_test.models.TestDefinition>` test_definition: Test definition to be created
:rtype: :class:`<TestDefinition> <azure.devops.v5_1.cloud_load_test.models.TestDefinition>`
"""
content = self._serialize.body(test_definition, 'TestDefinition')
response = self._send(http_method='POST',
location_id='a8f9b135-f604-41ea-9d74-d9a5fd32fcd8',
version='5.1-preview.1',
content=content)
return self._deserialize('TestDefinition', response)
def get_test_definition(self, test_definition_id):
"""GetTestDefinition.
[Preview API]
:param str test_definition_id: The test definition identifier
:rtype: :class:`<TestDefinition> <azure.devops.v5_1.cloud_load_test.models.TestDefinition>`
"""
route_values = {}
if test_definition_id is not None:
route_values['testDefinitionId'] = self._serialize.url('test_definition_id', test_definition_id, 'str')
response = self._send(http_method='GET',
location_id='a8f9b135-f604-41ea-9d74-d9a5fd32fcd8',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('TestDefinition', response)
def get_test_definitions(self, from_date=None, to_date=None, top=None):
"""GetTestDefinitions.
[Preview API]
:param str from_date: Date after which test definitions were created
:param str to_date: Date before which test definitions were crated
:param int top:
:rtype: [TestDefinitionBasic]
"""
query_parameters = {}
if from_date is not None:
query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'str')
if to_date is not None:
query_parameters['toDate'] = self._serialize.query('to_date', to_date, 'str')
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='a8f9b135-f604-41ea-9d74-d9a5fd32fcd8',
version='5.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[TestDefinitionBasic]', self._unwrap_collection(response))
def update_test_definition(self, test_definition):
"""UpdateTestDefinition.
[Preview API]
:param :class:`<TestDefinition> <azure.devops.v5_1.cloud_load_test.models.TestDefinition>` test_definition:
:rtype: :class:`<TestDefinition> <azure.devops.v5_1.cloud_load_test.models.TestDefinition>`
"""
content = self._serialize.body(test_definition, 'TestDefinition')
response = self._send(http_method='PUT',
location_id='a8f9b135-f604-41ea-9d74-d9a5fd32fcd8',
version='5.1-preview.1',
content=content)
return self._deserialize('TestDefinition', response)
def create_test_drop(self, web_test_drop):
"""CreateTestDrop.
[Preview API]
:param :class:`<TestDrop> <azure.devops.v5_1.cloud_load_test.models.TestDrop>` web_test_drop: Test drop to be created
:rtype: :class:`<TestDrop> <azure.devops.v5_1.cloud_load_test.models.TestDrop>`
"""
content = self._serialize.body(web_test_drop, 'TestDrop')
response = self._send(http_method='POST',
location_id='d89d0e08-505c-4357-96f6-9729311ce8ad',
version='5.1-preview.1',
content=content)
return self._deserialize('TestDrop', response)
def get_test_drop(self, test_drop_id):
"""GetTestDrop.
[Preview API]
:param str test_drop_id: The test drop identifier
:rtype: :class:`<TestDrop> <azure.devops.v5_1.cloud_load_test.models.TestDrop>`
"""
route_values = {}
if test_drop_id is not None:
route_values['testDropId'] = self._serialize.url('test_drop_id', test_drop_id, 'str')
response = self._send(http_method='GET',
location_id='d89d0e08-505c-4357-96f6-9729311ce8ad',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('TestDrop', response)
def create_test_run(self, web_test_run):
"""CreateTestRun.
[Preview API]
:param :class:`<TestRun> <azure.devops.v5_1.cloud_load_test.models.TestRun>` web_test_run:
:rtype: :class:`<TestRun> <azure.devops.v5_1.cloud_load_test.models.TestRun>`
"""
content = self._serialize.body(web_test_run, 'TestRun')
response = self._send(http_method='POST',
location_id='b41a84ff-ff03-4ac1-b76e-e7ea25c92aba',
version='5.1-preview.1',
content=content)
return self._deserialize('TestRun', response)
def get_test_run(self, test_run_id):
"""GetTestRun.
[Preview API]
:param str test_run_id: Unique ID of the test run
:rtype: :class:`<TestRun> <azure.devops.v5_1.cloud_load_test.models.TestRun>`
"""
route_values = {}
if test_run_id is not None:
route_values['testRunId'] = self._serialize.url('test_run_id', test_run_id, 'str')
response = self._send(http_method='GET',
location_id='b41a84ff-ff03-4ac1-b76e-e7ea25c92aba',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('TestRun', response)
def get_test_runs(self, name=None, requested_by=None, status=None, run_type=None, from_date=None, to_date=None, detailed=None, top=None, runsourceidentifier=None, retention_state=None):
"""GetTestRuns.
[Preview API] Returns test runs based on the filter specified. Returns all runs of the tenant if there is no filter.
:param str name: Name for the test run. Names are not unique. Test runs with same name are assigned sequential rolling numbers.
:param str requested_by: Filter by the user who requested the test run. Here requestedBy should be the display name of the user.
:param str status: Filter by the test run status.
:param str run_type: Valid values include: null, one of TestRunType, or "*"
:param str from_date: Filter by the test runs that have been modified after the fromDate timestamp.
:param str to_date: Filter by the test runs that have been modified before the toDate timestamp.
:param bool detailed: Include the detailed test run attributes.
:param int top: The maximum number of test runs to return.
:param str runsourceidentifier:
:param str retention_state:
:rtype: object
"""
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if requested_by is not None:
query_parameters['requestedBy'] = self._serialize.query('requested_by', requested_by, 'str')
if status is not None:
query_parameters['status'] = self._serialize.query('status', status, 'str')
if run_type is not None:
query_parameters['runType'] = self._serialize.query('run_type', run_type, 'str')
if from_date is not None:
query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'str')
if to_date is not None:
query_parameters['toDate'] = self._serialize.query('to_date', to_date, 'str')
if detailed is not None:
query_parameters['detailed'] = self._serialize.query('detailed', detailed, 'bool')
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int')
if runsourceidentifier is not None:
query_parameters['runsourceidentifier'] = self._serialize.query('runsourceidentifier', runsourceidentifier, 'str')
if retention_state is not None:
query_parameters['retentionState'] = self._serialize.query('retention_state', retention_state, 'str')
response = self._send(http_method='GET',
location_id='b41a84ff-ff03-4ac1-b76e-e7ea25c92aba',
version='5.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('object', response)
def update_test_run(self, web_test_run, test_run_id):
"""UpdateTestRun.
[Preview API]
:param :class:`<TestRun> <azure.devops.v5_1.cloud_load_test.models.TestRun>` web_test_run:
:param str test_run_id:
"""
route_values = {}
if test_run_id is not None:
route_values['testRunId'] = self._serialize.url('test_run_id', test_run_id, 'str')
content = self._serialize.body(web_test_run, 'TestRun')
self._send(http_method='PATCH',
location_id='b41a84ff-ff03-4ac1-b76e-e7ea25c92aba',
version='5.1-preview.1',
route_values=route_values,
content=content)
|
# importing required packages for this section
from urllib.parse import urlparse,urlencode
import ipaddress
import re
import re
from bs4 import BeautifulSoup
import whois
import urllib
import urllib.request
from datetime import datetime
import requests
class Extractor():
def __init__(self):
self.feature_names = ['Have_IP', 'Have_At', 'URL_Depth','Redirection',
'https_Domain', 'TinyURL', 'Prefix/Suffix', 'DNS_Record', 'Web_Traffic',
'Domain_Age', 'Domain_End', 'iFrame', 'Mouse_Over','Right_Click', 'Web_Forwards','Punny_Code']
# 2.Checks for IP address in URL (Have_IP)
@staticmethod
def havingIP(url):
try:
if ipaddress.ip_address(url) and Extractor.getLength(url) == 1 :
ip = 1
except:
ip = 0
return ip
# 3.Checks the presence of @ in URL (Have_At)
@staticmethod
def haveAtSign(url):
if "@" in url and Extractor.getLength(url) == 1:
at = 1
else:
at = 0
return at
# 4.Finding the length of URL and categorizing (URL_Length)
@staticmethod
def getLength(url):
if len(url) < 54:
length = 0
else:
length = 1
return length
# 5.Gives number of '/' in URL (URL_Depth)
@staticmethod
def getDepth(url):
s = urlparse(url).path.split('/')
depth = 0
for j in range(len(s)):
if len(s[j]) != 0:
depth = depth+1
return depth
# 6.Checking for redirection '//' in the url (Redirection)
@staticmethod
def redirection(url):
pos = url.rfind('//')
if pos > 6:
if pos > 7:
return 1
else:
return 0
else:
return 0
# 7.Existence of “HTTPS” Token in the Domain Part of the URL (https_Domain)
@staticmethod
def httpDomain(url):
# domain = urlparse(url).netloc
if 'https' or 'http' in url[5:] and Extractor.getLength(url) == 1:
return 1
else:
return 0
# 8. Checking for Shortening Services in URL (Tiny_URL)
@staticmethod
def tinyURL(url):
#listing shortening services
shortening_services = r"bit\.ly|goo\.gl|shorte\.st|go2l\.ink|x\.co|ow\.ly|t\.co|tinyurl|tr\.im|is\.gd|cli\.gs|" \
r"yfrog\.com|migre\.me|ff\.im|tiny\.cc|url4\.eu|twit\.ac|su\.pr|twurl\.nl|snipurl\.com|" \
r"short\.to|BudURL\.com|ping\.fm|post\.ly|Just\.as|bkite\.com|snipr\.com|fic\.kr|loopt\.us|" \
r"doiop\.com|short\.ie|kl\.am|wp\.me|rubyurl\.com|om\.ly|to\.ly|bit\.do|t\.co|lnkd\.in|db\.tt|" \
r"qr\.ae|adf\.ly|goo\.gl|bitly\.com|cur\.lv|tinyurl\.com|ow\.ly|bit\.ly|ity\.im|q\.gs|is\.gd|" \
r"po\.st|bc\.vc|twitthis\.com|u\.to|j\.mp|buzurl\.com|cutt\.us|u\.bb|yourls\.org|x\.co|" \
r"prettylinkpro\.com|scrnch\.me|filoops\.info|vzturl\.com|qr\.net|1url\.com|tweez\.me|v\.gd|" \
r"tr\.im|link\.zip\.net"
match=re.search(shortening_services,url)
if match:
return 1
else:
return 0
# 9.Checking for Prefix or Suffix Separated by (-) in the Domain (Prefix/Suffix)
@staticmethod
def prefixSuffix(url):
if '-' in urlparse(url).netloc:
return 1 # phishing
else:
return 0 # legitimate
# 12.Web traffic (Web_Traffic)
@staticmethod
def web_traffic(url):
try:
#Filling the whitespaces in the URL if any
url = urllib.parse.quote(url)
rank = BeautifulSoup(urllib.request.urlopen("http://data.alexa.com/data?cli=10&dat=s&url=" + url).read(), "xml").find("REACH")['RANK']
rank = int(rank)
except TypeError:
print("Cant get web traffic")
return 1
if rank <100000:
return 0
else:
return 1
# 13.Survival time of domain: The difference between termination time and creation time (Domain_Age)
@staticmethod
def domainAge(domain_name):
creation_date = domain_name.creation_date
expiration_date = domain_name.expiration_date
if (isinstance(creation_date,str) or isinstance(expiration_date,str)):
try:
creation_date = datetime.strptime(creation_date,'%Y-%m-%d')
expiration_date = datetime.strptime(expiration_date,"%Y-%m-%d")
except:
return 1
if ((expiration_date is None) or (creation_date is None)):
return 1
elif ((type(expiration_date) is list) or (type(creation_date) is list)):
return 1
else:
ageofdomain = abs((expiration_date - creation_date).days)
print("Domain Age: ", ageofdomain)
if ((ageofdomain/30) < 6):
age = 1
else:
age = 0
return age
# 14.End time of domain: The difference between termination time and current time (Domain_End)
@staticmethod
def domainEnd(domain_name):
expiration_date = domain_name.expiration_date
if isinstance(expiration_date,str):
try:
expiration_date = datetime.strptime(expiration_date,"%Y-%m-%d")
except:
return 1
if (expiration_date is None):
return 1
elif (type(expiration_date) is list):
return 1
else:
today = datetime.now()
end = abs((expiration_date - today).days)
if ((end/30) < 6):
end = 1
else:
end = 0
return end
# 15. IFrame Redirection (iFrame)
@staticmethod
def iframe(response):
if response == "":
return 1
else:
if re.findall(r"[<iframe>|<frameBorder>]", response.text):
return 0
else:
return 1
# 16.Checks the effect of mouse over on status bar (Mouse_Over)
@staticmethod
def mouseOver(response):
if response == "" :
return 1
else:
if re.findall("<script>.+onmouseover.+</script>", response.text):
return 1
else:
return 0
# 17.Checks the status of the right click attribute (Right_Click)
@staticmethod
def rightClick(response):
if response == "":
return 1
else:
if re.findall(r"event.button ?== ?2", response.text):
return 0
else:
return 1
# 18.Checks the number of forwardings (Web_Forwards)
@staticmethod
def forwarding(response):
if response == "":
return 1
else:
if len(response.history) <= 2:
return 0
else:
return 1
# 19.Punny code
@staticmethod
def punnycode(url):
vaild_regex = "/^(http|https|ftp):\/\/([A-Z0-9][A-Z0-9_-]*(?:\.[A-Z0-9][A-Z0-9_-]*)+):?(\d+)?\/?/i"
if re.match(vaild_regex,url):
punny = 1
else:
punny = 0
return punny
#Function to extract features
def __call__(self, url):
if isinstance(url, str):
url = url.rstrip()
features = []
features.append(self.havingIP(url))
features.append(self.haveAtSign(url))
# features.append(self.getLength(url))
features.append(self.getDepth(url))
features.append(self.redirection(url))
features.append(self.httpDomain(url))
features.append(self.tinyURL(url))
features.append(self.prefixSuffix(url))
#Domain based features (4)
dns = 0
try:
domain_name = whois.whois(urlparse(url).netloc)
# print(domain_name)
except:
print("Cant get domain name")
dns = 1
features.append(dns)
#features.append(self.web_traffic(url))
features.append(1 if dns == 1 else self.domainAge(domain_name))
features.append(1 if dns == 1 else self.domainEnd(domain_name))
# HTML & Javascript based features
try:
response = requests.get(url)
except:
response = ""
features.append(self.iframe(response))
features.append(self.mouseOver(response))
features.append(self.rightClick(response))
features.append(self.forwarding(response))
features.append(self.punnycode(url))
print(features)
return features
return []
if __name__ == "__main__":
ext = Extractor()
print(ext("https://stackoverflow.com/questions/42179046/what-flavor-of-regex-does-visual-studio-code-use"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''tfidf处理器,对分词后的文本进行统计词频和计算tfidf值,输出文本特征.
init:初始化tfidf流程
fit:初始化模型内部参数
transform:将数据代入模型进行转换
'''
import logging
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import FunctionTransformer
import numpy as np
from jieba_processor import JiebaProcessor
logging.basicConfig()
LOGGER = logging.getLogger('TFIDFProcessor')
LOGGER.setLevel(level=logging.DEBUG)
class TFIDFProcessor(object):
def __init__(self, normalizer='basic', term_file=None, token_level='word', ngram_size=1):
'''
初始化tfidf流程,包括统计词频和计算tfidf值
Args:
term_file:同义词词典文件
token_level: char-level or word-level
ngram_size: the window size of ngram
Returns:
None
'''
self.tokenizer = JiebaProcessor(normalizer=normalizer, term_file=term_file)
self.token_level = token_level
self.ngram_size = ngram_size
self.clf = Pipeline([
('vect', CountVectorizer(token_pattern=r"(?u)\b\w+\b", ngram_range=(ngram_size, ngram_size))),
('tfidf', TfidfTransformer())
])
def fit(self, x):
'''
fit training data to TFIDFProcessor to learn idf vector
Args:
x: training data/corpus
Returns:
None
'''
tokenized_x = self.__tokenize(x)
self.clf.fit(tokenized_x)
def transform(self, x):
'''
Transform data into a tfidf feature matrix
Args:
x: input data, a list of sentences
Returns:
a sparse matrix of shape (n, m) in which 'n' is the size of data and 'm' is the size of vocabulary
'''
#LOGGER.debug('input data: ["%s"]' % ('\",\"'.join(x)))
tokenized_x = self.__tokenize(x)
#LOGGER.debug('tokenize for param [%s] output: ["%s"]' % (self.get_params(), '\",\"'.join(tokenized_x)))
#LOGGER.debug('ngram sequence for param [%s] output: ["%s"]' %
# (self.get_params(), '\",\"'.join(self.clf.named_steps['vect'].get_feature_names())))
#LOGGER.debug('vect for param [%s] output: \n%s' %
# (self.get_params(), str(self.clf.named_steps['vect'].transform(tokenized_x))))
#LOGGER.debug('tfidf for param [%s] output: \n%s' %
# (self.get_params(), str(self.clf.named_steps['tfidf'].transform(self.clf.named_steps['vect'].transform(tokenized_x)))))
return self.clf.transform(tokenized_x)
def __process_ngram(self, tokenized_data):
'''
add start_pos_tag&end_pos_tag tokens for ngram
Args:
tokenized_data: a list of tokenized sentences
Returns:
a list of tokenized sentences with start_pos_tag&end_pos_tag tokens
'''
return ['start_pos_tag ' + tokenized_s + ' end_pos_tag' for tokenized_s in tokenized_data]
def __tokenize(self, data):
'''
tokenize data with jieba and process for n-gram(n > 1)
Args:
data: a list of sentences
Returns:
a list of tokenized sentences
'''
tokenized_x = self.tokenizer.transform(data, self.token_level)
#LOGGER.debug('jieba tokenizer for param [%s] output is: ["%s"]' % (self.get_params(), '\",\"'.join(tokenized_x)))
# if ngram mode is enable, add start_pos_tag&end_pos_tag token
if self.ngram_size > 1:
tokenized_x = self.__process_ngram(tokenized_x)
#LOGGER.debug('ngram processer for param [%s] output is: ["%s"]' % (self.get_params(), '\",\"'.join(tokenized_x)))
return tokenized_x
def get_params(self):
return 'token_level = %s, ngram_size = %s' % (self.token_level, self.ngram_size)
if __name__ == '__main__':
x_train = [u'@kg.MutualFund 的收益如何', u'一年@<EMAIL> 基金@托管费 收多少']
#x_train = [u'@kg.MutualFund 的收益如何']
term_file = '/Users/gyt/work/text_classification/train_data/term_data_20181106.csv'
preprocessor = TFIDFProcessor(normalizer='basic_with_num', term_file=term_file,
token_level='char', ngram_size=2)
preprocessor.fit(x_train)
LOGGER.debug('CountVectorizer output is: [\"%s\"]' % '\",\"'.join(preprocessor.clf.named_steps['vect'].get_feature_names()))
x_train_encode = preprocessor.transform(x_train)
LOGGER.debug('transform output type is: %s' % str(type(x_train_encode)))
print x_train_encode
print type(x_train_encode)
|
import random, math
import numpy as np
from rlpy.util import CallbackList
class Agent(object):
def __init__(self, brain, alpha=0.001):
self.Brain = brain
self.RunningReward = None
self.Alpha = alpha
self.EpisodeReward = 0.0
self.resetRunningReward()
def resetRunningReward(self):
self.RunningReward = 0.0
def play_episode(self, env, max_steps=None, render=False, training=False, callbacks=None):
callbacks = CallbackList.convert(callbacks)
action_probs_history = []
rewards_history = []
actions_history = []
observation_history = []
valids_history = []
values_history = []
meta_history = []
probs_history = []
means_history = []
sigmas_history = []
controls_history = []
env_actions_history = []
if False:
print("--- beginn episode ---")
self.Brain.reset_episode()
if callbacks:
callbacks("agent_episode_begin", self, training=training, render=render)
tup = env.reset()
# the env may return either just the state, or a tuple, (state, metadata)
if isinstance(tup, tuple) and len(tup) == 2:
state, meta = tup
else:
state, meta = tup, {}
if render:
env.render()
if callbacks:
callbacks("agent_episode_reset", self, state, meta)
#print("p:", " ", state)
self.EpisodeReward = 0.0
done = False
steps = 0
while (not done) and (max_steps is None or steps < max_steps):
#print("calculating probs...")
if not isinstance(state, list):
state = [state] # always a list of np arrays
observation_history.append(state)
#print("Agent.play_episode: state:", state)
valid_actions = meta.get("valid_actions") # None if not there
#action, controls = self.Brain.policy(probs, means, sigmas, training, valid_actions)
if valid_actions is not None:
# assume it's consistent through the episode
valids_history.append(valid_actions)
value, probs, means, sigmas, actions, controls, env_action = self.Brain.action(state, valid_actions, training)
# probs is a structure describing the probabilities for various actions here
# action can be:
# int - single discrete action
# float - single control
# ndarray of ints - multiple discrete actions
# ndarray of floats - multiple controls
# tuple (int or ndarray of ints, float or ndattay of floats)
probs_history.append(probs)
if actions is not None:
actions_history.append(actions)
values_history.append(value)
probs_history.append(probs)
if means is not None:
means_history.append(means)
sigmas_history.append(sigmas)
if controls is not None:
controls_history.append(controls)
env_actions_history.append(env_action)
new_state, reward, done, meta = env.step(env_action)
if callbacks:
callbacks("agent_episode_step", self, env_action, new_state, reward, done, meta)
meta_history.append(meta)
if False:
print("state:", state, " probs:", probs, " action:", env_action, " reward:", reward, " new state:", new_state, " done:", done)
#print("p:", action, state, reward, done)
state = new_state
#prev_controls = controls
if render:
env.render()
rewards_history.append(reward)
self.EpisodeReward += reward
steps += 1
#print("Agent.play_episode: done:", done)
#print("returning")
if self.RunningReward is None:
self.RunningReward = self.EpisodeReward
else:
self.RunningReward += self.Alpha*(self.EpisodeReward - self.RunningReward)
#print("Agent.play_episode: episode reward:", self.EpisodeReward, " running reward ->", self.RunningReward)
self.EpisodeHistory = dict(
steps = steps,
rewards = np.array(rewards_history),
observations = observation_history,
actions = np.array(actions_history, dtype=np.int),
probs = np.array(probs_history),
means = np.array(means_history),
sigmas = np.array(sigmas_history),
controls = np.array(controls_history),
valid_actions = np.array(valids_history) if valids_history else None,
meta = meta_history,
env_actions = env_actions_history
)
if callbacks:
callbacks("agent_episode_end", self, self.EpisodeReward, self.EpisodeHistory)
return self.EpisodeReward, self.EpisodeHistory
def episode_history(self):
return self.EpisodeHistory
def train_on_multi_episode_history(self, multi_ep_history):
return self.Brain.train_on_multi_episode_history(multi_ep_history)
class MultiAgent(object):
def __init__(self, brain, alpha=0.01):
self.Brain = brain
self.RunningReward = 0.0
self.EpisodeReward = 0.0
self.Alpha = alpha
self.Reward = 0.0 # reward accunulated since last action
self.Training = None
self.resetRunningReward()
self.History = [] # [(observation, action, reward)]
def resetRunningReward(self):
self.RunningReward = 0.0
def reset(self, training=True):
self.Training = training
self.EpisodeReward = self.Reward = 0.0
self.Observations = []
self.States = []
self.Rewards = []
self.Actions = []
self.ValidActions = []
self.ProbsHistory = []
self.FirstMove = True
self.Done = False
self.PrevAction = -1
#print("Agent[%d].reset()" % (id(self)%100,))
self.History = [] # [(observation, action, reward)]
def choose_action(self, observation, valid_actions, training):
_, probs = self.Brain.evaluate_step(self.PrevAction, observation)
action = self.Brain.policy(probs, training, valid_actions)
self.ProbsHistory.append(probs)
return action
def action(self, observation, valid_actions=None):
# this is reward for the previuous action
if not isinstance(observation, list): # make sure observation is a list of np.arrays
observation = [observation]
self.Observations.append(observation)
if valid_actions is not None:
self.ValidActions.append(valid_actions)
if not self.FirstMove:
self.Rewards.append(self.Reward)
self.FirstMove = False
self.EpisodeReward += self.Reward
self.Reward = 0.0
action = self.choose_action(observation, valid_actions, self.Training)
self.Actions.append(action)
#print("Agent[%d].action() -> %d" % (id(self)%100, action))
self.PrevAction = action
self.History.append((observation, action, None))
return action
def update(self, observation=None, reward=None):
if reward: self.Reward += reward
self.Observation = observation
self.History.append((observation, None, reward))
#print("Agent[%d].reward(%.4f) accumulated=%.4f" % (id(self)%100, reward, self.Reward))
def done(self, last_observation, reward=0.0):
#print("Agent[%d].done()" % (id(self)%100, ))
#print("Agent[%d].done(reward=%f)" % (id(self)%100, reward))
#print("Agent", id(self)%10, "done:", reward)
self.Reward += reward
self.EpisodeReward += self.Reward
if not self.Done:
if not self.FirstMove:
self.Rewards.append(self.Reward)
self.Reward = 0.0
self.RunningReward += self.Alpha*(self.EpisodeReward - self.RunningReward)
self.Done = True
self.History.append((last_observation, None, reward))
#self.Observations.append(observation)
def episode_history(self):
valids = np.array(self.ValidActions) if self.ValidActions else None
out = {
"nsteps": len(self.Actions),
"rewards": np.array(self.Rewards),
"actions": np.array(self.Actions),
"valid_actions": valids,
"observations": self.Observations, # list of lists of ndarrays
"probs": np.array(self.ProbsHistory),
"full_history": self.History
}
#print("Agent[%d].history():" % (id(self)%100,), *((k, len(lst) if lst is not None else "none") for k, lst in out.items()))
#print(" valids:", out["valids"])
#print(" observations:", out["observations"])
#for obs, a, r in zip(out["observations"], out["actions"], out["rewards"]):
# print(" ", obs, a, r)
#print("Multiagent: episode_history: shapes:", [(name, x.shape) for name, x in out.items()])
return out
|
import ipaddress
import unittest
from lib.Ipam import *
from docker_plugin_api.Plugin import InputValidationException
class PoolInvalidCreateTest(unittest.TestCase):
def test_noargs(self):
with self.assertRaises(InputValidationException):
Pool()
def test_subpool_only(self):
with self.assertRaises(InputValidationException):
Pool(subPool='127.0.0.0/24')
def test_subpool_invalid(self):
with self.assertRaises(InputValidationException):
Pool(pool='127.0.0.0/24', subPool='127.1.2.3/24')
class PoolCreateTest(unittest.TestCase):
def test_auto_ipv4(self):
pool = Pool(v6=False)
ip = ipaddress.ip_network(str(pool))
self.assertTrue(isinstance(ip, ipaddress.IPv4Network))
def test_auto_ipv6(self):
pool = Pool(v6=True)
ip = ipaddress.ip_network(str(pool))
self.assertTrue(isinstance(ip, ipaddress.IPv6Network))
def test_pool_ipv4(self):
pool = Pool(pool='127.0.0.1/24')
ip = ipaddress.ip_network(str(pool))
self.assertTrue(isinstance(ip, ipaddress.IPv4Network))
self.assertEqual(ip, ipaddress.ip_network('127.0.0.0/24'))
def test_pool_ipv6(self):
pool = Pool(pool='fd0fdf8:f53e:61e4::18/64')
ip = ipaddress.ip_network(str(pool))
self.assertTrue(isinstance(ip, ipaddress.IPv6Network))
self.assertEqual(ip, ipaddress.ip_network('fd00:a:b:c::/64'))
def test_subpool_ipv4(self):
pool = Pool(pool='127.0.0.1/8', subPool='127.0.0.2/24')
ip = ipaddress.ip_network(str(pool))
self.assertTrue(isinstance(ip, ipaddress.IPv4Network))
self.assertEqual(ip, ipaddress.ip_network('127.0.0.0/8'))
ip = ipaddress.ip_network(str(pool.subpool))
self.assertTrue(isinstance(ip, ipaddress.IPv4Network))
self.assertEqual(ip, ipaddress.ip_network('127.0.0.0/24'))
def test_subpool_ipv6(self):
pool = Pool(pool='fd00:a:b:c:1:2:3:4/48', subPool='fd00:a:b:c:1:2:3:4/64')
ip = ipaddress.ip_network(str(pool))
self.assertTrue(isinstance(ip, ipaddress.IPv6Network))
self.assertEqual(ip, ipaddress.ip_network('fd00:a:b::/48'))
ip = ipaddress.ip_network(str(pool.subpool))
self.assertTrue(isinstance(ip, ipaddress.IPv6Network))
self.assertEqual(ip, ipaddress.ip_network('fd00:a:b:c::/64'))
class PoolComparisonTest(unittest.TestCase):
def test_pool_same_ipv4(self):
pool1 = Pool(pool='127.0.0.1/8')
pool2 = Pool(pool='127.0.0.2/8')
self.assertEqual(pool1, pool2)
self.assertEqual(pool2, pool1)
def test_pool_same_ipv6(self):
pool1 = Pool(pool='fd00:a:b::/64')
pool2 = Pool(pool='fd00:a:b:0:1:2:3:4/64')
self.assertEqual(pool1, pool2)
self.assertEqual(pool2, pool1)
class PoolOverlapTest(unittest.TestCase):
def test_pool_overlap_ipv4(self):
pool1 = Pool(pool='127.0.0.1/24')
pool2 = Pool(pool='127.0.2.3/16')
self.assertTrue(pool1.overlaps(pool1))
self.assertTrue(pool2.overlaps(pool2))
self.assertTrue(pool1.overlaps(pool2))
self.assertTrue(pool2.overlaps(pool1))
def test_pool_overlap_ipv6(self):
pool1 = Pool(pool='fe80::/64')
pool2 = Pool(pool='fe80::1:2:3:4/72')
self.assertTrue(pool1.overlaps(pool1))
self.assertTrue(pool2.overlaps(pool2))
self.assertTrue(pool1.overlaps(pool2))
self.assertTrue(pool2.overlaps(pool1))
class PoolAllocateInvalidTest(unittest.TestCase):
def test_pool_allocate_invalid_ipv4(self):
pool = Pool(pool='127.0.0.0/30')
self.assertEqual(pool.allocate(), '127.0.0.1/30')
self.assertEqual(pool.allocate(), '127.0.0.2/30')
with self.assertRaises(InputValidationException):
pool.allocate('172.16.31.10')
with self.assertRaises(InputValidationException):
pool.allocate('127.0.0.0')
with self.assertRaises(InputValidationException):
pool.allocate('127.0.0.1')
with self.assertRaises(InputValidationException):
pool.allocate('127.0.0.2')
with self.assertRaises(InputValidationException):
pool.allocate('127.0.0.3')
with self.assertRaises(InputValidationException):
pool.allocate('127.0.0.4')
def test_pool_allocate_invalid_ipv6(self):
pool = Pool(pool='fd00::/126')
self.assertEqual(pool.allocate(), 'fd00::1/126')
self.assertEqual(pool.allocate(), 'fd0fdf8:f53e:61e4::18/126')
self.assertEqual(pool.allocate(), 'fd00::3/126')
with self.assertRaises(InputValidationException):
pool.allocate('fcff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
with self.assertRaises(InputValidationException):
pool.allocate('fd00::')
with self.assertRaises(InputValidationException):
pool.allocate('fd00::1')
with self.assertRaises(InputValidationException):
pool.allocate('fdfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
with self.assertRaises(InputValidationException):
pool.allocate('fdfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
with self.assertRaises(InputValidationException):
pool.allocate('fd00::4')
class PoolAllocateOrderTest(unittest.TestCase):
def test_pool_allocate_ipv4(self):
pool = Pool(pool='127.0.0.0/30')
self.assertEqual(pool.allocate(), '127.0.0.1/30')
self.assertEqual(pool.allocate(), '127.0.0.2/30')
with self.assertRaises(InputValidationException):
pool.allocate()
with self.assertRaises(InputValidationException):
pool.allocate()
with self.assertRaises(InputValidationException):
pool.allocate()
def test_pool_allocate_ipv6(self):
pool = Pool(pool='fd00::/126')
self.assertEqual(pool.allocate(), 'fd00::1/126')
self.assertEqual(pool.allocate(), 'fd0fdf8:f53e:61e4::18/126')
self.assertEqual(pool.allocate(), 'fd00::3/126')
with self.assertRaises(InputValidationException):
pool.allocate()
with self.assertRaises(InputValidationException):
pool.allocate()
with self.assertRaises(InputValidationException):
pool.allocate()
def test_pool_allocate_large_ipv4(self):
pool = Pool(pool='127.0.0.0/8')
self.assertEqual(pool.allocate(), '127.0.0.1/8')
self.assertEqual(pool.allocate(), '127.0.0.2/8')
def test_pool_allocate_large_ipv6(self):
pool = Pool(pool='fd00::/56')
self.assertEqual(pool.allocate(), 'fd00::1/56')
self.assertEqual(pool.allocate(), 'fd00::2/56')
class PoolAllocateManualTest(unittest.TestCase):
def test_pool_allocate_ipv4(self):
pool = Pool(pool='127.0.0.0/28')
self.assertEqual(pool.allocate('127.0.0.3'), '127.0.0.3/28')
self.assertEqual(pool.allocate('127.0.0.5'), '127.0.0.5/28')
self.assertEqual(pool.allocate('127.0.0.11'), '127.0.0.11/28')
self.assertEqual(pool.allocate('127.0.0.14'), '127.0.0.14/28')
self.assertEqual(pool.allocate(), '127.0.0.1/28')
self.assertEqual(pool.allocate(), '127.0.0.2/28')
self.assertEqual(pool.allocate(), '127.0.0.4/28')
self.assertEqual(pool.allocate(), '127.0.0.6/28')
self.assertEqual(pool.allocate(), '127.0.0.7/28')
self.assertEqual(pool.allocate(), '127.0.0.8/28')
self.assertEqual(pool.allocate(), '127.0.0.9/28')
self.assertEqual(pool.allocate(), '127.0.0.10/28')
self.assertEqual(pool.allocate(), '127.0.0.12/28')
self.assertEqual(pool.allocate(), '127.0.0.13/28')
with self.assertRaises(InputValidationException):
pool.allocate()
def test_pool_allocate_ipv6(self):
pool = Pool(pool='fd00::/124')
self.assertEqual(pool.allocate('fd0fdf8:f53e:61e4::18'), 'fd00::3/124')
self.assertEqual(pool.allocate('fd0fdf8:f53e:61e4::18'), 'fd00::5/124')
self.assertEqual(pool.allocate('fd00::b'), 'fd00::b/124')
self.assertEqual(pool.allocate('fd00::e'), 'fd00::e/124')
self.assertEqual(pool.allocate(), 'fd00::1/124')
self.assertEqual(pool.allocate(), 'fd00::2/124')
self.assertEqual(pool.allocate(), 'fd0fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/124')
self.assertEqual(pool.allocate(), 'fd0fdf8:f53e:61e4::18/124')
self.assertEqual(pool.allocate(), 'fd0fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/124')
self.assertEqual(pool.allocate(), 'fd0fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/124')
self.assertEqual(pool.allocate(), 'fd00::9/124')
self.assertEqual(pool.allocate(), 'fd00::a/124')
self.assertEqual(pool.allocate(), 'fd00::c/124')
self.assertEqual(pool.allocate(), 'fd00::d/124')
self.assertEqual(pool.allocate(), 'fd00::f/124')
with self.assertRaises(InputValidationException):
pool.allocate()
def test_subpool_allocate_ipv4(self):
pool = Pool(pool='127.0.0.0/8', subPool='127.0.0.0/30')
self.assertEqual(pool.allocate('127.0.0.2'), '127.0.0.2/8')
self.assertEqual(pool.allocate('127.1.2.3'), '127.1.2.3/8')
self.assertEqual(pool.allocate('127.255.0.5'), '127.255.0.5/8')
self.assertEqual(pool.allocate('127.248.255.11'), '127.248.255.11/8')
self.assertEqual(pool.allocate('127.71.12.14'), '127.71.12.14/8')
self.assertEqual(pool.allocate(), '127.0.0.1/8')
# This is not-fully-correct as we can theoretically assign 127.0.0.3 - feel free to fix it
with self.assertRaises(InputValidationException):
pool.allocate()
self.assertEqual(pool.allocate('127.0.0.3'), '127.0.0.3/8')
def test_subpool_middle_allocate_ipv4(self):
pool = Pool(pool='127.0.0.0/8', subPool='127.248.255.0/30')
self.assertEqual(pool.allocate('127.0.0.2'), '127.0.0.2/8')
self.assertEqual(pool.allocate('127.1.2.3'), '127.1.2.3/8')
self.assertEqual(pool.allocate('127.255.0.5'), '127.255.0.5/8')
self.assertEqual(pool.allocate('127.248.255.2'), '127.248.255.2/8')
self.assertEqual(pool.allocate('127.71.12.14'), '127.71.12.14/8')
self.assertEqual(pool.allocate(), '127.248.255.1/8')
# This is not-fully-correct as we can theoretically assign 127.248.255.0 and 127.248.255.3 - feel free to fix it
with self.assertRaises(InputValidationException):
pool.allocate()
self.assertEqual(pool.allocate('127.248.255.0'), '127.248.255.0/8')
self.assertEqual(pool.allocate('127.248.255.3'), '127.248.255.3/8')
def test_subpool_allocate_ipv6(self):
pool = Pool(pool='fe80::/64', subPool='fe80::/126')
self.assertEqual(pool.allocate('fe80::2'), 'fe80::2/64')
self.assertEqual(pool.allocate('fe80::1:2:3:4'), 'fe80::1:2:3:4/64')
self.assertEqual(pool.allocate('fe80::ffff:fefe:fdfd:fcfc'), 'fe80::ffff:fefe:fdfd:fcfc/64')
self.assertEqual(pool.allocate(), 'fe80::1/64')
self.assertEqual(pool.allocate(), 'fe80::3/64')
with self.assertRaises(InputValidationException):
pool.allocate()
def test_subpool_middle_allocate_ipv6(self):
pool = Pool(pool='fe80::/64', subPool='fe80::ffff:0:0:0/126')
self.assertEqual(pool.allocate('fe80::2'), 'fe80::2/64')
self.assertEqual(pool.allocate('fe80::1:2:3:4'), 'fe80::1:2:3:4/64')
self.assertEqual(pool.allocate('fe80::ffff:0:0:2'), 'fe80::ffff:0:0:2/64')
self.assertEqual(pool.allocate(), 'fe80::ffff:0:0:1/64')
self.assertEqual(pool.allocate(), 'fe80::ffff:0:0:3/64')
# This is not-fully-correct as we can theoretically assign fe80::ffff:0:0:0 - feel free to fix it
with self.assertRaises(InputValidationException):
pool.allocate()
self.assertEqual(pool.allocate('fe80::ffff:0:0:0'), 'fe80::ffff:0:0:0/64')
class TestPoolAllocateRelease(unittest.TestCase):
def test_pool_allocate_release_ipv4(self):
pool = Pool(pool='127.0.0.0/29')
self.assertEqual(pool.allocate('127.0.0.3'), '127.0.0.3/29')
self.assertEqual(pool.allocate('127.0.0.5'), '127.0.0.5/29')
self.assertEqual(pool.allocate(), '127.0.0.1/29')
self.assertEqual(pool.allocate(), '127.0.0.2/29')
pool.deallocate('127.0.0.3')
self.assertEqual(pool.allocate(), '127.0.0.3/29')
self.assertEqual(pool.allocate(), '127.0.0.4/29')
self.assertEqual(pool.allocate(), '127.0.0.6/29')
with self.assertRaises(InputValidationException):
pool.allocate()
pool.deallocate('127.0.0.5')
self.assertEqual(pool.allocate(), '127.0.0.5/29')
pool.deallocate('127.0.0.1')
pool.deallocate('127.0.0.2')
self.assertEqual(pool.allocate('127.0.0.2'), '127.0.0.2/29')
self.assertEqual(pool.allocate(), '127.0.0.1/29')
def test_pool_allocate_release_ipv6(self):
pool = Pool(pool='fe80::/125')
self.assertEqual(pool.allocate('fe80::3'), 'fe80::3/125')
self.assertEqual(pool.allocate('fe80::5'), 'fe80::5/125')
self.assertEqual(pool.allocate(), 'fe80::1/125')
self.assertEqual(pool.allocate(), 'fe80::2/125')
pool.deallocate('fe80::3')
self.assertEqual(pool.allocate(), 'fe80::3/125')
self.assertEqual(pool.allocate(), 'fe80::4/125')
self.assertEqual(pool.allocate(), 'fe80::6/125')
self.assertEqual(pool.allocate(), 'fe80::7/125')
with self.assertRaises(InputValidationException):
pool.allocate()
pool.deallocate('fe80::5')
self.assertEqual(pool.allocate(), 'fe80::5/125')
pool.deallocate('fe80::1')
pool.deallocate('fe80::2')
self.assertEqual(pool.allocate('fe80::2'), 'fe80::2/125')
self.assertEqual(pool.allocate(), 'fe80::1/125')
|
<gh_stars>0
__author__ = 'Jwely'
from py.utils.get_rel_humidity import get_rel_humidity
import json
class Experiment:
def __init__(self, experiment_id, n_samples, z_location, v_nominal, dt, test_date,
v_fs_mean, v_fs_sigma, q, pres_atm, temp_tunnel, wet_bulb, dry_bulb, eta_p):
"""
Class to represent the entire state of an experiment. Create one with a
constructor. Unlike the other classes in the managers module, this class is specific
for my experiments (user: Jwely) and the documentation formats and variable names that I
used.
:param experiment_id: integer ID number 1 through 70 (integer)
:param n_samples: number of observations, usually 200 (integer)
:param z_location: downstream distance of interrogation plane (inches, float)
:param v_nominal: nominal velocity target (m/s float)
:param dt: time between laser pulses (microseconds integer)
:param test_date: date on which the experiment took place (datetime)
:param v_fs_mean: measured free stream velocity (m/s float)
:param v_fs_sigma: instability in free stream velocity (m/s float)
:param q: stagnation pressure inside the tunnel (Pascals integer)
:param pres_atm: atmospheric pressure (Pascals integer)
:param temp_tunnel: temperature in the tunnel (Celsius float)
:param wet_bulb: wet bulb temperature at time of test (Kelvin float)
:param dry_bulb: wet bulb temperature at time of test (Kelvin float)
:param eta_p: added pressure relaxation arg
:return:
"""
# attributes as arguments (must have been recorded at time of experiment)
self.experiment_id = experiment_id
self.n_samples = n_samples
self.z_location = z_location
self.z_location_mm = int(z_location * 25.4 + 0.5) # convert to mm and rount to integer
self.v_nominal = v_nominal
self.dt = dt
self.test_date = test_date
self.v_fs_mean = v_fs_mean
self.v_fs_sigma = v_fs_sigma
self.q = q
self.pres_atm = pres_atm
self.temp_tunnel = temp_tunnel
self.wet_bulb = wet_bulb
self.dry_bulb = dry_bulb
self.eta_p = eta_p
# calculate relative humidity from inputs
self.rel_humid = get_rel_humidity(dry_bulb, wet_bulb, pres_atm)
# attributes ingested by function call
self.axial_vortex = None # will be an actual AxialVortex instance
def ingest_axial_vortex(self, axial_vortex_instance):
"""
:param axial_vortex_instance:
:return:
"""
self.axial_vortex = axial_vortex_instance
self.axial_vortex.get_turb_visc_by_vtheta(self.eta_p)
self.axial_vortex.get_pressure_relax_terms(self.eta_p)
def to_json(self, json_path):
"""
dumps the parameters of this experiment to a json file, including vortex characterization
:param json_path:
:return:
"""
outdict = self.axial_vortex.char_dict
atts = ["experiment_id", "n_samples", "z_location_mm", "v_nominal", "dt", "test_date", "v_fs_mean",
"v_fs_sigma", "q", "pres_atm", "temp_tunnel", "wet_bulb", "dry_bulb", "rel_humid", "eta_p"]
for att in atts:
outdict.update({att: getattr(self, att)})
with open(json_path, 'w+') as logfile:
logfile.write(json.dumps(outdict, indent=4))
return outdict
|
import logging
import requests
import xml.etree.ElementTree
import re
from urllib.parse import urljoin
from datetime import datetime, timedelta, timezone
DEFAULT_STATUS_PATH = 'DI_S_.xml'
DEFAULT_LINE_PATH = 'PI_FXS_1_Stats.xml'
DEFAULT_CALL_STATUS_PATH = 'callstatus.htm'
_LOGGER = logging.getLogger(__name__)
class PyObihai:
def __init__(self, host, username, password):
"""Initialize connection."""
self._username = username
self._password = password
if self._username == "user":
host = host + "/user/"
self._server = '{}://{}'.format('http', host)
self._last_reboot = datetime.now(timezone.utc)
def get_state(self):
"""Get the state for services sensors, phone sensor and last reboot."""
url = urljoin(self._server, DEFAULT_STATUS_PATH)
services = dict()
try:
resp = requests.get(url, auth=requests.auth.HTTPDigestAuth(self._username,self._password), timeout=2)
root = xml.etree.ElementTree.fromstring(resp.text)
for models in root.iter('model'):
if models.attrib["reboot_req"]:
services["Reboot Required"] = models.attrib["reboot_req"]
for o in root.findall("object"):
name = o.attrib.get('name')
if 'Service Status' in name:
if 'OBiTALK Service Status' in name:
for e in o.findall("./parameter[@name='Status']/value"):
state = e.attrib.get('current').split()[0]
services[name] = state
else:
for e in o.findall("./parameter[@name='Status']/value"):
state = e.attrib.get('current').split()[0]
if state != 'Service':
for x in o.findall("./parameter[@name='CallState']/value"):
state = x.attrib.get('current').split()[0]
services[name] = state
if 'Product Information' in name:
for e in o.findall("./parameter[@name='UpTime']/value"):
days = e.attrib.get('current').split()[0]
days = int(days)
time = e.attrib.get('current').split()[2]
h, m, s = time.split(':')
h = int(h)
m = int(m)
s = int(s)
now = datetime.now(timezone.utc)
state = now - timedelta(days=days, hours=h, minutes=m, seconds=s, microseconds=now.microsecond)
if abs(self._last_reboot - state) > timedelta(seconds=5):
self._last_reboot = state
services["Last Reboot"] = self._last_reboot.isoformat()
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
return services
def get_line_state(self):
"""Get the state of the port connection and last caller info."""
url = urljoin(self._server, DEFAULT_LINE_PATH)
services = dict()
try:
resp = requests.get(url, auth=requests.auth.HTTPDigestAuth(self._username,self._password), timeout=2)
if (resp.status_code != 200):
return
root = xml.etree.ElementTree.fromstring(resp.text)
for o in root.findall("object"):
name = o.attrib.get('name')
subtitle = o.attrib.get('subtitle')
if 'Port Status' in name:
for e in o.findall("./parameter[@name='State']/value"):
state = e.attrib.get('current')
services[subtitle] = state
for x in o.findall("./parameter[@name='LastCallerInfo']/value"):
state = x.attrib.get('current')
services[subtitle + " Last Caller Info"] = state
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
return services
def get_device_mac(self):
"""Get the device mac address."""
url = urljoin(self._server, DEFAULT_STATUS_PATH)
mac = None
try:
resp = requests.get(url, auth=requests.auth.HTTPDigestAuth(self._username,self._password), timeout=2)
root = xml.etree.ElementTree.fromstring(resp.text)
for o in root.findall("object"):
name = o.attrib.get('name')
if 'WAN Status' in name:
for e in o.findall("./parameter[@name='MACAddress']/value"):
mac = e.attrib.get('current')
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
return mac
def get_device_serial(self):
"""Get the device serial number."""
url = urljoin(self._server, DEFAULT_STATUS_PATH)
serial = None
try:
resp = requests.get(url, auth=requests.auth.HTTPDigestAuth(self._username,self._password), timeout=2)
root = xml.etree.ElementTree.fromstring(resp.text)
for o in root.findall("object"):
name = o.attrib.get('name')
if 'Product Information' in name:
for e in o.findall("./parameter[@name='SerialNumber']/value"):
serial = e.attrib.get('current')
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
return serial
def get_call_direction(self):
"""Get the call direction."""
url = urljoin(self._server, DEFAULT_CALL_STATUS_PATH)
call_direction = dict()
call_direction['Call Direction'] = 'No Active Calls'
try:
response = requests.get(url, auth=requests.auth.HTTPDigestAuth(self._username,self._password), timeout=2)
lines = response.text
start = lines.find("Number of Active Calls:")
if start != -1:
temp_str = lines[start + 24:]
end = temp_str.find("</tr>")
if end != -1:
call_status = str(temp_str[:end])
if call_status == "1":
start = lines.find("Inbound")
if start != -1:
call_direction['Call Direction'] = "Inbound Call"
else:
start = lines.find("Outbound")
if start != -1:
call_direction['Call Direction'] = "Outbound Call"
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
return call_direction
def check_account(self):
"""Check account credentials."""
url = urljoin(self._server, DEFAULT_STATUS_PATH)
try:
response = requests.get(url, auth=requests.auth.HTTPDigestAuth(self._username,self._password), timeout=2)
if response.status_code == 200:
return True
except requests.exceptions.RequestException:
_LOGGER.error("Invalid credentials")
return False
|
<gh_stars>0
"""
User Interface part of nuqql
"""
#######################
# USER INTERFACE PART #
#######################
import curses
import curses.ascii
import datetime
import nuqql.config
import nuqql.conversation
import nuqql.history
def handle_message(backend, acc_id, tstamp, sender, msg):
"""
Handle message from backend
"""
# convert timestamp
tstamp = datetime.datetime.fromtimestamp(tstamp)
# look for an existing conversation and use it
for conv in nuqql.conversation.CONVERSATIONS:
if conv.backend is backend and \
conv.account and conv.account.aid == acc_id and \
conv.name == sender:
# log message
log_msg = conv.log(conv.name, msg, tstamp=tstamp)
nuqql.history.log(conv, log_msg)
# if window is not already active notify user
if not conv.is_active():
conv.notify()
return
# nothing found, log to main window
backend.conversation.log(sender, msg, tstamp=tstamp)
def update_buddy(buddy):
"""
Update buddy in UI
"""
# look for existing buddy
for conv in nuqql.conversation.CONVERSATIONS:
if not isinstance(conv, nuqql.conversation.BuddyConversation):
continue
conv_buddy = conv.peers[0]
if conv_buddy is buddy:
conv.wins.list_win.redraw()
def add_buddy(buddy):
"""
Add a new buddy to UI
"""
# add a new conversation for the new buddy
conv = nuqql.conversation.BuddyConversation(buddy.backend, buddy.account,
buddy.name)
conv.peers.append(buddy)
conv.wins.list_win.add(conv)
conv.wins.list_win.redraw()
# check if there are unread messages for this new buddy in the history
last_log_msg = nuqql.history.get_last_log_line(conv)
last_read_msg = nuqql.history.get_lastread(conv)
if last_log_msg:
if not last_read_msg or not last_log_msg.is_equal(last_read_msg):
# there are unread messages, notify user if
# conversation is inactive
if not conv.is_active():
conv.notify()
def read_input():
"""
Read user input and return it to caller
"""
# try to get input from user (timeout set in init())
try:
wch = nuqql.win.MAIN_WINS["screen"].get_wch()
except curses.error:
# no user input...
wch = None
return wch
def show_terminal_warning():
"""
Show a warning that the terminal size is invalid, if it fits on screen
"""
# clear terminal
nuqql.win.MAIN_WINS["screen"].clear()
# check if terminal is big enough for at least one character
max_y, max_x = nuqql.win.MAIN_WINS["screen"].getmaxyx()
if max_y < 1:
return
if max_x < 1:
return
# print as much of the error message as possible
msg = "Invalid terminal size. Please resize."[:max_x - 1]
nuqql.win.MAIN_WINS["screen"].addstr(0, 0, msg)
def is_input_valid(char):
"""
Helper that checks if input is valid
"""
# is there a char at all?
if char is None:
return False
# check for embedded 0 byte
if char == "\0":
return False
return True
def handle_input():
"""
Read and handle user input
"""
# wait for user input and get timeout or character to process
char = read_input()
# handle user input
if not is_input_valid(char):
# No valid input, keep waiting for input
return True
# if terminal size is not valid, stop here
if not nuqql.config.WinConfig.is_terminal_valid():
show_terminal_warning()
return True
# if terminal resized, resize and redraw active windows
if char == curses.KEY_RESIZE:
nuqql.conversation.resize_main_window()
return True
# pass user input to active conversation
for conv in nuqql.conversation.CONVERSATIONS:
if conv.is_active():
conv.process_input(char)
return True
# if no conversation is active pass input to active list window
if nuqql.win.MAIN_WINS["list"].state.active:
# list window navigation
nuqql.win.MAIN_WINS["input"].redraw()
nuqql.win.MAIN_WINS["log"].redraw()
nuqql.win.MAIN_WINS["list"].process_input(char)
return True
# list window is also inactive -> user quit
return False
def start(stdscr, func):
"""
Start UI and run provided function
"""
# save stdscr
nuqql.win.MAIN_WINS["screen"] = stdscr
# configuration
stdscr.timeout(10)
# clear everything
stdscr.clear()
stdscr.refresh()
# make sure window config is loaded
nuqql.config.init_win(stdscr)
# create main windows, if terminal size is valid, otherwise just stop here
if not nuqql.config.WinConfig.is_terminal_valid():
return "Terminal size invalid."
nuqql.conversation.create_main_windows()
# run function provided by caller
return func()
def init(func):
"""
Initialize UI
"""
retval = curses.wrapper(start, func)
if retval and retval != "":
print(retval)
|
<reponame>blueyed/multidict<filename>setup.py
import codecs
import pathlib
from itertools import islice
import os
import platform
import re
import sys
from setuptools import setup, Extension
from distutils.errors import (CCompilerError, DistutilsExecError,
DistutilsPlatformError)
from distutils.command.build_ext import build_ext
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
PROFILE_BUILD = bool(os.environ.get('PROFILE_BUILD'))
"""Flag whether extensions should be built with profiling enabled."""
NO_EXTENSIONS = bool(os.environ.get('MULTIDICT_NO_EXTENSIONS'))
"""Flag whether extensions building/usage should be skipped."""
PYPY = platform.python_implementation() == 'PyPy'
"""Flag whether we are in PyPy runtime."""
USE_CYTHON_EXTENSIONS = not NO_EXTENSIONS and not PYPY
"""Flag whether prerequisites for building extensions are met."""
here = pathlib.Path(__file__).parent
"""Current folder (containing setup.py)."""
IS_GIT_REPO = (here / '.git').exists()
"""Flag whether we are in Git repo."""
ignore_compile_excs = (
() if USE_CYTHON_EXTENSIONS and IS_GIT_REPO
else (CCompilerError, )
) + (DistutilsExecError, DistutilsPlatformError, ValueError)
"""Exceptions to ignore during compilation."""
# Fallbacks for PyPy: don't use C extensions
extensions = []
cmdclass = {}
if USE_CYTHON_EXTENSIONS:
if IS_GIT_REPO and not USE_CYTHON:
print("Install cython when building from git clone",
file=sys.stderr)
print("Hint:", file=sys.stderr)
print(" pip install cython", file=sys.stderr)
sys.exit(1)
ext = '.pyx' if USE_CYTHON else '.c'
if PROFILE_BUILD:
macros = [('CYTHON_TRACE', '1')]
else:
macros = []
CFLAGS = ['-O2']
# CFLAGS = ['-g']
if platform.system() != 'Windows':
CFLAGS.extend(['-std=c99', '-Wall', '-Wsign-compare', '-Wconversion',
'-fno-strict-aliasing'])
extensions = [
Extension(
'multidict._multidict',
[
'multidict/_multidict' + ext,
'multidict/_pair_list.c',
'multidict/_multidict_iter.c',
'multidict/_multidict_views.c'
],
extra_compile_args=CFLAGS
)
]
if USE_CYTHON:
if PROFILE_BUILD:
directives = {"linetrace": True}
else:
directives = {}
extensions = cythonize(extensions, compiler_directives=directives)
extensions.append(Extension('multidict._istr',
['multidict/_istr.c']))
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except (DistutilsPlatformError, FileNotFoundError):
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ignore_compile_excs:
raise BuildFailed()
cmdclass['build_ext'] = ve_build_ext
try:
from wheel.bdist_wheel import bdist_wheel
def emit_chunks_of(num, from_):
res = []
for i in from_:
res.append(i)
if len(res) >= num:
yield tuple(res)
res = []
if len(res):
yield tuple(res)
class _bdist_wheel(bdist_wheel):
user_options = bdist_wheel.user_options + [
('plat-tag-chunk-num=', None,
'macOS platform tags subset size'),
('plat-tag-chunk-pos=', None,
'macOS platform tags chunk position'),
]
@property
def macos_platforms_range(self):
return range(6, 14)
def initialize_options(self):
super().initialize_options()
self.plat_tag_chunk_num = None
self.plat_tag_chunk_pos = None
def finalize_options(self):
super().finalize_options()
try:
self.plat_tag_chunk_num = int(self.plat_tag_chunk_num)
if not self.plat_tag_chunk_num:
raise ValueError
except TypeError:
"""None value."""
pass
except ValueError:
"""Empty string or 0."""
raise ValueError(
'plat-tag-chunk-num must be a positive number'
)
try:
self.plat_tag_chunk_pos = int(self.plat_tag_chunk_pos)
if not self.plat_tag_chunk_pos:
raise ValueError
except TypeError:
"""None value."""
pass
except ValueError:
"""Empty string or 0."""
raise ValueError(
'plat-tag-chunk-num must be a positive number'
)
def get_macos_compatible_tags(self):
return (
'macosx_10_{ver}_{arch}'.format(ver=v, arch=a)
for v in self.macos_platforms_range
for a in ('intel', 'x86_64')
)
def select_macos_tags_chunk(self):
compatible_platforms = self.get_macos_compatible_tags()
if self.plat_tag_chunk_num and self.plat_tag_chunk_pos:
compatible_platforms = islice(
emit_chunks_of(
self.plat_tag_chunk_num,
compatible_platforms,
),
self.plat_tag_chunk_pos - 1,
self.plat_tag_chunk_pos,
)
try:
compatible_platforms = next(
iter(compatible_platforms)
)
except StopIteration:
raise ValueError(
'You must select an existing macOS tag chunk.'
)
return tuple(compatible_platforms)
def get_tag(self):
tag = super().get_tag()
if tag[2] != 'macosx_10_6_intel':
return tag
compatible_platforms = self.select_macos_tags_chunk()
new_version_tag = '.'.join(compatible_platforms)
return tag[:2] + (new_version_tag, )
cmdclass['bdist_wheel'] = _bdist_wheel
except ImportError:
"""Wheel is not installed."""
with codecs.open(os.path.join(os.path.abspath(os.path.dirname(
__file__)), 'multidict', '__init__.py'), 'r', 'latin1') as fp:
try:
version = re.findall(r"^__version__ = '([^']+)'\r?$",
fp.read(), re.M)[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
NEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []
tests_require = ['pytest', 'pytest-cov']
name = 'multidict'
appveyor_slug = 'asvetlov/{}'.format(name) # FIXME: move under aio-libs/* slug
repo_slug = 'aio-libs/{}'.format(name)
repo_url = 'https://github.com/{}'.format(repo_slug)
args = dict(
name=name,
version=version,
description=('multidict implementation'),
long_description=read('README.rst'),
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Development Status :: 5 - Production/Stable',
],
author='<NAME>',
author_email='<EMAIL>',
url=repo_url,
project_urls={
'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
'CI: AppVeyor': 'https://ci.appveyor.com/project/{}'.format(appveyor_slug),
'CI: Circle': 'https://circleci.com/gh/{}'.format(repo_slug),
'CI: Shippable': 'https://app.shippable.com/github/{}'.format(repo_slug),
'CI: Travis': 'https://travis-ci.com/{}'.format(repo_slug),
'Coverage: codecov': 'https://codecov.io/github/{}'.format(repo_slug),
'Docs: RTD': 'https://{}.readthedocs.io'.format(name),
'GitHub: issues': '{}/issues'.format(repo_url),
'GitHub: repo': repo_url,
},
license='Apache 2',
packages=['multidict'],
python_requires='>=3.4.1',
tests_require=tests_require,
setup_requires=pytest_runner,
include_package_data=True,
ext_modules=extensions,
cmdclass=cmdclass)
try:
setup(**args)
except BuildFailed:
print("************************************************************")
print("Cannot compile C accelerator module, use pure python version")
print("************************************************************")
del args['ext_modules']
del args['cmdclass']
setup(**args)
|
<reponame>joshua-gould/anndata
from __future__ import annotations
from os import PathLike
from collections.abc import Mapping
from functools import partial
from typing import Union
from types import MappingProxyType
from warnings import warn
import h5py
import numpy as np
import pandas as pd
from scipy import sparse
import anndata as ad
from anndata import AnnData, Raw
from anndata._core.index import _normalize_indices
from anndata._core.merge import intersect_keys
from anndata._core.sparse_dataset import SparseDataset
from anndata._core import views
from anndata.compat import (
Literal,
OverloadedDict,
ZarrArray,
ZarrGroup,
_read_attr,
_from_fixed_length_strings,
_decode_structured_array,
)
from anndata._io.utils import report_write_key_on_error, check_key, H5PY_V3
from anndata._warnings import OldFormatWarning
from .registry import (
_REGISTRY,
IOSpec,
get_spec,
read_elem,
read_elem_partial,
write_elem,
)
H5Array = h5py.Dataset
H5Group = h5py.Group
####################
# Dispatch methods #
####################
# def is_full_slice(idx):
# if isinstance(idx, tuple)len(idx) == 1:
# if isinstance(idx, type(None)):
# return True
# elif idx is Ellipsis:
# return True
# elif isinstance(idx, tuple):
# for el in idx:
# if isinstance(el, type(None)):
# pass
# elif isinstance(el, slice):
# if el != slice(None):
# return False
# else:
# return False
# return True
# return False
################################
# Fallbacks / backwards compat #
################################
# Note: there is no need for writing in a backwards compatible format, maybe
@_REGISTRY.register_read(H5Group, IOSpec("", ""))
@_REGISTRY.register_read(H5Array, IOSpec("", ""))
def read_basic(elem):
from anndata._io import h5ad
warn(
f"Element '{elem.name}' was written without encoding metadata.",
OldFormatWarning,
stacklevel=3,
)
if isinstance(elem, Mapping):
# Backwards compat sparse arrays
if "h5sparse_format" in elem.attrs:
return SparseDataset(elem).to_memory()
return {k: read_elem(v) for k, v in elem.items()}
elif isinstance(elem, h5py.Dataset):
return h5ad.read_dataset(elem) # TODO: Handle legacy
@_REGISTRY.register_read(ZarrGroup, IOSpec("", ""))
@_REGISTRY.register_read(ZarrArray, IOSpec("", ""))
def read_basic_zarr(elem):
from anndata._io import zarr
warn(
f"Element '{elem.name}' was written without encoding metadata.",
OldFormatWarning,
stacklevel=3,
)
if isinstance(elem, Mapping):
# Backwards compat sparse arrays
if "h5sparse_format" in elem.attrs:
return SparseDataset(elem).to_memory()
return {k: read_elem(v) for k, v in elem.items()}
elif isinstance(elem, ZarrArray):
return zarr.read_dataset(elem) # TODO: Handle legacy
# @_REGISTRY.register_read_partial(IOSpec("", ""))
# def read_basic_partial(elem, *, items=None, indices=(slice(None), slice(None))):
# if isinstance(elem, Mapping):
# return _read_partial(elem, items=items, indices=indices)
# elif indices != (slice(None), slice(None)):
# return elem[indices]
# else:
# return elem[()]
###########
# AnnData #
###########
def read_indices(group):
obs_group = group["obs"]
obs_idx_elem = obs_group[_read_attr(obs_group.attrs, "_index")]
obs_idx = read_elem(obs_idx_elem)
var_group = group["var"]
var_idx_elem = var_group[_read_attr(var_group.attrs, "_index")]
var_idx = read_elem(var_idx_elem)
return obs_idx, var_idx
def read_partial(
pth: PathLike,
*,
obs_idx=slice(None),
var_idx=slice(None),
X=True,
obs=None,
var=None,
obsm=None,
varm=None,
obsp=None,
varp=None,
layers=None,
uns=None,
) -> ad.AnnData:
result = {}
with h5py.File(pth, "r") as f:
obs_idx, var_idx = _normalize_indices((obs_idx, var_idx), *read_indices(f))
result["obs"] = read_elem_partial(
f["obs"], items=obs, indices=(obs_idx, slice(None))
)
result["var"] = read_elem_partial(
f["var"], items=var, indices=(var_idx, slice(None))
)
if X:
result["X"] = read_elem_partial(f["X"], indices=(obs_idx, var_idx))
else:
result["X"] = sparse.csr_matrix((len(result["obs"]), len(result["var"])))
if "obsm" in f:
result["obsm"] = _read_partial(
f["obsm"], items=obsm, indices=(obs_idx, slice(None))
)
if "varm" in f:
result["varm"] = _read_partial(
f["varm"], items=varm, indices=(var_idx, slice(None))
)
if "obsp" in f:
result["obsp"] = _read_partial(
f["obsp"], items=obsp, indices=(obs_idx, obs_idx)
)
if "varp" in f:
result["varp"] = _read_partial(
f["varp"], items=varp, indices=(var_idx, var_idx)
)
if "layers" in f:
result["layers"] = _read_partial(
f["layers"], items=layers, indices=(obs_idx, var_idx)
)
if "uns" in f:
result["uns"] = _read_partial(f["uns"], items=uns)
return ad.AnnData(**result)
def _read_partial(group, *, items=None, indices=(slice(None), slice(None))):
if group is None:
return None
if items is None:
keys = intersect_keys((group,))
else:
keys = intersect_keys((group, items))
result = {}
for k in keys:
if isinstance(items, Mapping):
next_items = items.get(k, None)
else:
next_items = None
result[k] = read_elem_partial(group[k], items=next_items, indices=indices)
return result
@_REGISTRY.register_write(ZarrGroup, AnnData, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_write(H5Group, AnnData, IOSpec("anndata", "0.1.0"))
def write_anndata(f, k, adata, dataset_kwargs=MappingProxyType({})):
g = f.require_group(k)
write_elem(g, "X", adata.X, dataset_kwargs=dataset_kwargs)
write_elem(g, "obs", adata.obs, dataset_kwargs=dataset_kwargs)
write_elem(g, "var", adata.var, dataset_kwargs=dataset_kwargs)
write_elem(g, "obsm", dict(adata.obsm), dataset_kwargs=dataset_kwargs)
write_elem(g, "varm", dict(adata.varm), dataset_kwargs=dataset_kwargs)
write_elem(g, "obsp", dict(adata.obsp), dataset_kwargs=dataset_kwargs)
write_elem(g, "varp", dict(adata.varp), dataset_kwargs=dataset_kwargs)
write_elem(g, "layers", dict(adata.layers), dataset_kwargs=dataset_kwargs)
write_elem(g, "uns", dict(adata.uns), dataset_kwargs=dataset_kwargs)
write_elem(g, "raw", adata.raw, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_read(H5Group, IOSpec("raw", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("raw", "0.1.0"))
def read_anndata(elem):
d = {}
for k in [
"X",
"obs",
"var",
"obsm",
"varm",
"obsp",
"varp",
"layers",
"uns",
"raw",
]:
if k in elem:
d[k] = read_elem(elem[k])
if "X" in d:
d["dtype"] = d["X"].dtype
return AnnData(**d)
@_REGISTRY.register_write(H5Group, Raw, IOSpec("raw", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, Raw, IOSpec("raw", "0.1.0"))
def write_raw(f, k, raw, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
write_elem(g, "X", raw.X, dataset_kwargs=dataset_kwargs)
write_elem(g, "var", raw.var, dataset_kwargs=dataset_kwargs)
write_elem(g, "varm", dict(raw.varm), dataset_kwargs=dataset_kwargs)
############
# Mappings #
############
@_REGISTRY.register_read(H5Group, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("dict", "0.1.0"))
def read_mapping(elem):
return {k: read_elem(v) for k, v in elem.items()}
@_REGISTRY.register_write(H5Group, OverloadedDict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(H5Group, dict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, OverloadedDict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, dict, IOSpec("dict", "0.1.0"))
def write_mapping(f, k, v, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
for sub_k, sub_v in v.items():
write_elem(g, sub_k, sub_v, dataset_kwargs=dataset_kwargs)
##############
# np.ndarray #
##############
@_REGISTRY.register_write(H5Group, list, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, list, IOSpec("array", "0.2.0"))
def write_list(f, k, elem, dataset_kwargs=MappingProxyType({})):
write_elem(f, k, np.array(elem), dataset_kwargs=dataset_kwargs)
# TODO: Is this the right behaviour for MaskedArrays?
# It's in the `AnnData.concatenate` docstring, but should we keep it?
@_REGISTRY.register_write(H5Group, views.ArrayView, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.ndarray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, h5py.Dataset, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.ma.MaskedArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, views.ArrayView, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.ndarray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, h5py.Dataset, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.ma.MaskedArray, IOSpec("array", "0.2.0"))
def write_basic(f, k, elem, dataset_kwargs=MappingProxyType({})):
"""Write methods which underlying library handles nativley."""
f.create_dataset(k, data=elem, **dataset_kwargs)
@_REGISTRY.register_read(H5Array, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("string-array", "0.2.0"))
def read_array(elem):
return elem[()]
@_REGISTRY.register_read_partial(H5Array, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrArray, IOSpec("string-array", "0.2.0"))
def read_array_partial(elem, *, items=None, indices=(slice(None, None))):
return elem[indices]
# arrays of strings
@_REGISTRY.register_read(H5Array, IOSpec("string-array", "0.2.0"))
def read_string_array(d):
return read_array(d.asstr())
@_REGISTRY.register_read_partial(H5Array, IOSpec("string-array", "0.2.0"))
def read_array_partial(d, items=None, indices=slice(None)):
return read_array_partial(d.asstr(), items=items, indices=indices)
@_REGISTRY.register_write(
H5Group, (views.ArrayView, "U"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(
H5Group, (views.ArrayView, "O"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(H5Group, (np.ndarray, "U"), IOSpec("string-array", "0.2.0"))
@_REGISTRY.register_write(H5Group, (np.ndarray, "O"), IOSpec("string-array", "0.2.0"))
def write_vlen_string_array(f, k, elem, dataset_kwargs=MappingProxyType({})):
"""Write methods which underlying library handles nativley."""
str_dtype = h5py.special_dtype(vlen=str)
f.create_dataset(k, data=elem.astype(str_dtype), dtype=str_dtype, **dataset_kwargs)
@_REGISTRY.register_write(
ZarrGroup, (views.ArrayView, "U"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(
ZarrGroup, (views.ArrayView, "O"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "U"), IOSpec("string-array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "O"), IOSpec("string-array", "0.2.0"))
def write_vlen_string_array_zarr(f, k, elem, dataset_kwargs=MappingProxyType({})):
import numcodecs
f.create_dataset(
k,
shape=elem.shape,
dtype=object,
object_codec=numcodecs.VLenUTF8(),
**dataset_kwargs,
)
f[k][:] = elem
###############
# np.recarray #
###############
def _to_hdf5_vlen_strings(value: np.ndarray) -> np.ndarray:
"""This corrects compound dtypes to work with hdf5 files."""
new_dtype = []
for dt_name, (dt_type, _) in value.dtype.fields.items():
if dt_type.kind in ("U", "O"):
new_dtype.append((dt_name, h5py.special_dtype(vlen=str)))
else:
new_dtype.append((dt_name, dt_type))
return value.astype(new_dtype)
@_REGISTRY.register_read(H5Array, IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("rec-array", "0.2.0"))
def read_recarray(d):
value = d[()]
dtype = value.dtype
value = _from_fixed_length_strings(value)
if H5PY_V3:
value = _decode_structured_array(value, dtype=dtype)
return value
@_REGISTRY.register_write(H5Group, (np.ndarray, "V"), IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.recarray, IOSpec("rec-array", "0.2.0"))
def write_recarray(f, k, elem, dataset_kwargs=MappingProxyType({})):
f.create_dataset(k, data=_to_hdf5_vlen_strings(elem), **dataset_kwargs)
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "V"), IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.recarray, IOSpec("rec-array", "0.2.0"))
def write_recarray_zarr(f, k, elem, dataset_kwargs=MappingProxyType({})):
from anndata.compat import _to_fixed_length_strings
f.create_dataset(k, data=_to_fixed_length_strings(elem), **dataset_kwargs)
#################
# Sparse arrays #
#################
def write_sparse_compressed(
f, key, value, fmt: Literal["csr", "csc"], dataset_kwargs=MappingProxyType({})
):
g = f.create_group(key)
g.attrs["shape"] = value.shape
# Allow resizing
if "maxshape" not in dataset_kwargs:
dataset_kwargs = dict(maxshape=(None,), **dataset_kwargs)
g.create_dataset("data", data=value.data, **dataset_kwargs)
g.create_dataset("indices", data=value.indices, **dataset_kwargs)
g.create_dataset("indptr", data=value.indptr, **dataset_kwargs)
write_csr = partial(write_sparse_compressed, fmt="csr")
write_csc = partial(write_sparse_compressed, fmt="csc")
_REGISTRY.register_write(H5Group, sparse.csr_matrix, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(H5Group, views.SparseCSRView, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(H5Group, sparse.csc_matrix, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(H5Group, views.SparseCSCView, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(ZarrGroup, sparse.csr_matrix, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(ZarrGroup, views.SparseCSRView, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(ZarrGroup, sparse.csc_matrix, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(ZarrGroup, views.SparseCSCView, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
@_REGISTRY.register_write(H5Group, SparseDataset, IOSpec("", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, SparseDataset, IOSpec("", "0.1.0"))
def write_sparse_dataset(f, k, elem, dataset_kwargs=MappingProxyType({})):
write_sparse_compressed(
f, k, elem.to_backed(), fmt=elem.format_str, dataset_kwargs=dataset_kwargs
)
# TODO: Cleaner way to do this
f[k].attrs["encoding-type"] = f"{elem.format_str}_matrix"
f[k].attrs["encoding-version"] = "0.1.0"
@_REGISTRY.register_read(H5Group, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read(H5Group, IOSpec("csr_matrix", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("csr_matrix", "0.1.0"))
def read_sparse(elem):
return SparseDataset(elem).to_memory()
@_REGISTRY.register_read_partial(H5Group, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read_partial(H5Group, IOSpec("csr_matrix", "0.1.0"))
def read_sparse_partial(elem, *, items=None, indices=(slice(None), slice(None))):
return SparseDataset(elem)[indices]
##############
# DataFrames #
##############
@_REGISTRY.register_write(H5Group, views.DataFrameView, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(H5Group, pd.DataFrame, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, views.DataFrameView, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, pd.DataFrame, IOSpec("dataframe", "0.2.0"))
def write_dataframe(f, key, df, dataset_kwargs=MappingProxyType({})):
# Check arguments
for reserved in ("_index",):
if reserved in df.columns:
raise ValueError(f"{reserved!r} is a reserved name for dataframe columns.")
group = f.create_group(key)
col_names = [check_key(c) for c in df.columns]
group.attrs["column-order"] = col_names
if df.index.name is not None:
index_name = df.index.name
else:
index_name = "_index"
group.attrs["_index"] = check_key(index_name)
# ._values is "the best" array representation. It's the true array backing the
# object, where `.values` is always a np.ndarray and .array is always a pandas
# array.
write_elem(group, index_name, df.index._values, dataset_kwargs=dataset_kwargs)
for colname, series in df.items():
# TODO: this should write the "true" representation of the series (i.e. the underlying array or ndarray depending)
write_elem(group, colname, series._values, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("dataframe", "0.2.0"))
def read_dataframe(elem):
columns = list(_read_attr(elem.attrs, "column-order"))
idx_key = _read_attr(elem.attrs, "_index")
df = pd.DataFrame(
{k: read_elem(elem[k]) for k in columns},
index=read_elem(elem[idx_key]),
columns=list(columns),
)
if idx_key != "_index":
df.index.name = idx_key
return df
# TODO: Figure out what indices is allowed to be at each element
@_REGISTRY.register_read_partial(H5Group, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrGroup, IOSpec("dataframe", "0.2.0"))
def read_dataframe_partial(
elem, *, items=None, indices=(slice(None, None), slice(None, None))
):
if items is not None:
columns = [
col for col in _read_attr(elem.attrs, "column-order") if col in items
]
else:
columns = list(_read_attr(elem.attrs, "column-order"))
idx_key = _read_attr(elem.attrs, "_index")
df = pd.DataFrame(
{k: read_elem_partial(elem[k], indices=indices[0]) for k in columns},
index=read_elem_partial(elem[idx_key], indices=indices[0]),
columns=list(columns),
)
if idx_key != "_index":
df.index.name = idx_key
return df
# Backwards compat dataframe reading
@_REGISTRY.register_read(H5Group, IOSpec("dataframe", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("dataframe", "0.1.0"))
def read_dataframe_0_1_0(elem):
columns = _read_attr(elem.attrs, "column-order")
idx_key = _read_attr(elem.attrs, "_index")
df = pd.DataFrame(
{k: read_series(elem[k]) for k in columns},
index=read_series(elem[idx_key]),
columns=list(columns),
)
if idx_key != "_index":
df.index.name = idx_key
return df
def read_series(dataset: h5py.Dataset) -> Union[np.ndarray, pd.Categorical]:
# For reading older dataframes
if "categories" in dataset.attrs:
if isinstance(dataset, ZarrArray):
import zarr
parent_name = dataset.name.rstrip(dataset.basename)
parent = zarr.open(dataset.store)[parent_name]
else:
parent = dataset.parent
categories_dset = parent[_read_attr(dataset.attrs, "categories")]
categories = read_elem(categories_dset)
ordered = bool(_read_attr(categories_dset.attrs, "ordered", False))
return pd.Categorical.from_codes(
read_elem(dataset), categories, ordered=ordered
)
else:
return read_elem(dataset)
@_REGISTRY.register_read_partial(H5Group, IOSpec("dataframe", "0.1.0"))
@_REGISTRY.register_read_partial(ZarrGroup, IOSpec("dataframe", "0.1.0"))
def read_partial_dataframe_0_1_0(
elem, *, items=None, indices=(slice(None), slice(None))
):
if items is None:
items = slice(None)
else:
items = list(items)
return read_elem(elem)[items].iloc[indices[0]]
###############
# Categorical #
###############
@_REGISTRY.register_write(H5Group, pd.Categorical, IOSpec("categorical", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, pd.Categorical, IOSpec("categorical", "0.2.0"))
def write_categorical(f, k, v, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
g.attrs["ordered"] = bool(v.ordered)
write_elem(g, "codes", v.codes, dataset_kwargs=dataset_kwargs)
write_elem(g, "categories", v.categories._values, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("categorical", "0.2.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("categorical", "0.2.0"))
def read_categorical(elem):
return pd.Categorical.from_codes(
codes=read_elem(elem["codes"]),
categories=read_elem(elem["categories"]),
ordered=_read_attr(elem.attrs, "ordered"),
)
@_REGISTRY.register_read_partial(H5Group, IOSpec("categorical", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrGroup, IOSpec("categorical", "0.2.0"))
def read_categorical(elem, *, items=None, indices=(slice(None),)):
return pd.Categorical.from_codes(
codes=read_elem_partial(elem["codes"], indices=indices),
categories=read_elem(elem["categories"]),
ordered=_read_attr(elem.attrs, "ordered"),
)
####################
# Pandas nullables #
####################
@_REGISTRY.register_write(
H5Group, pd.arrays.IntegerArray, IOSpec("nullable-integer", "0.1.0")
)
@_REGISTRY.register_write(
ZarrGroup, pd.arrays.IntegerArray, IOSpec("nullable-integer", "0.1.0")
)
@_REGISTRY.register_write(
H5Group, pd.arrays.BooleanArray, IOSpec("nullable-boolean", "0.1.0")
)
@_REGISTRY.register_write(
ZarrGroup, pd.arrays.BooleanArray, IOSpec("nullable-boolean", "0.1.0")
)
def write_nullable_integer(f, k, v, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
if v._mask is not None:
write_elem(g, "mask", v._mask, dataset_kwargs=dataset_kwargs)
write_elem(g, "values", v._data, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("nullable-integer", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("nullable-integer", "0.1.0"))
def read_nullable_integer(elem):
if "mask" in elem:
return pd.arrays.IntegerArray(
read_elem(elem["values"]), mask=read_elem(elem["mask"])
)
else:
return pd.array(read_elem(elem["values"]))
@_REGISTRY.register_read(H5Group, IOSpec("nullable-boolean", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("nullable-boolean", "0.1.0"))
def read_nullable_boolean(elem):
if "mask" in elem:
return pd.arrays.BooleanArray(
read_elem(elem["values"]), mask=read_elem(elem["mask"])
)
else:
return pd.array(read_elem(elem["values"]))
###########
# Scalars #
###########
@_REGISTRY.register_read(H5Array, IOSpec("numeric-scalar", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("numeric-scalar", "0.2.0"))
def read_scalar(elem):
return elem[()]
def write_scalar(f, key, value, dataset_kwargs=MappingProxyType({})):
return f.create_dataset(key, data=np.array(value), **dataset_kwargs)
def write_hdf5_scalar(f, key, value, dataset_kwargs=MappingProxyType({})):
# Can’t compress scalars, error is thrown
dataset_kwargs = dataset_kwargs.copy()
dataset_kwargs.pop("compression", None)
dataset_kwargs.pop("compression_opts", None)
f.create_dataset(key, data=np.array(value), **dataset_kwargs)
# fmt: off
for numeric_scalar_type in [
bool, np.bool_,
np.uint8, np.uint16, np.uint32, np.uint64,
int, np.int8, np.int16, np.int32, np.int64,
float, *np.floating.__subclasses__(),
*np.complexfloating.__subclasses__(),
]:
_REGISTRY.register_write(H5Group, numeric_scalar_type, IOSpec("numeric-scalar", "0.2.0"))(write_hdf5_scalar)
_REGISTRY.register_write(ZarrGroup, numeric_scalar_type, IOSpec("numeric-scalar", "0.2.0"))(write_scalar)
# fmt: on
_REGISTRY.register_write(ZarrGroup, str, IOSpec("string", "0.2.0"))(write_scalar)
_REGISTRY.register_write(ZarrGroup, np.str_, IOSpec("string", "0.2.0"))(write_scalar)
@_REGISTRY.register_read(H5Array, IOSpec("string", "0.2.0"))
def read_hdf5_string(elem):
return elem.asstr()[()]
@_REGISTRY.register_read(ZarrArray, IOSpec("string", "0.2.0"))
def read_zarr_string(elem):
return str(elem[()])
_REGISTRY.register_read(H5Array, IOSpec("bytes", "0.2.0"))(read_scalar)
_REGISTRY.register_read(ZarrArray, IOSpec("bytes", "0.2.0"))(read_scalar)
@_REGISTRY.register_write(H5Group, np.str_, IOSpec("string", "0.2.0"))
@_REGISTRY.register_write(H5Group, str, IOSpec("string", "0.2.0"))
def write_string(f, k, v, dataset_kwargs):
dataset_kwargs = dataset_kwargs.copy()
dataset_kwargs.pop("compression", None)
dataset_kwargs.pop("compression_opts", None)
f.create_dataset(
k, data=np.array(v, dtype=h5py.string_dtype(encoding="utf-8")), **dataset_kwargs
)
# @_REGISTRY.register_write(np.bytes_, IOSpec("bytes", "0.2.0"))
# @_REGISTRY.register_write(bytes, IOSpec("bytes", "0.2.0"))
# def write_string(f, k, v, dataset_kwargs):
# if "compression" in dataset_kwargs:
# dataset_kwargs = dict(dataset_kwargs)
# dataset_kwargs.pop("compression")
# f.create_dataset(k, data=np.array(v), **dataset_kwargs)
|
""" Extra function: annot2vector, annot2frames, unroll and roll.
Transformating the annots a song into different representations.
They are disconnected to the class because they can be
applyied to a subsection i.e. for transforming only one indivual level
to a vector representation.
<NAME> 2018
"""
import copy
import numpy as np
from .download import (audio_from_url, get_my_ydl)
from . import utilities as ut
def unroll(annot):
"""Unrolls the hierarchical information into paragraphs, lines, words
keeping the relations with the key 'index.'
"""
tmp = copy.deepcopy(annot['hierarchical'])
p, _ = ut.unroll(tmp, depth=0, output=[])
l, _ = ut.unroll(tmp, depth=1, output=[])
w, _ = ut.unroll(tmp, depth=2, output=[])
m, _ = ut.unroll(tmp, depth=3, output=[])
return {'paragraphs': p, 'lines': l, 'words': w, 'notes': m}
def roll(annot):
"""Rolls the individual info into a hierarchical level.
Output example: [paragraph]['text'][line]['text'][word]['text'][notes]'
"""
tmp = copy.deepcopy(annot)
output = ut.roll(tmp['notes'], tmp['words'])
output = ut.roll(output, tmp['lines'])
output = ut.roll(output, tmp['paragraphs'])
return {'hierarchical': output}
def annot2frames(annot, time_r, type='horizontal', depth=3):
"""Transforms annot time into a discrete formart wrt a time_resolution.
This function can be use with the whole annotation or with a subset.
For example, it can be called with a particular paragraph in the horizontal
format [annot[paragraph_i]] or line [annot[paragraph_i]['text'][line_i]].
Parameters
----------
annot : list
annotations vector (annotations['annot']) in any the formats.
time_r : float
time resolution for discriticing the time.
type : str
annotation format: horizontal or vertical.
depth : int
depth of the horizontal level.
"""
output = []
tmp = copy.deepcopy(annot)
try:
if type == 'horizontal':
output = ut.sample(tmp, time_r)
elif type == 'vertical':
vertical = [ut.sample(ut.unroll(tmp, [], depth=depth)[0], time_r)
for i in range(depth+1)][::-1]
for i in range(len(vertical[:-1])):
if i == 0:
output = roll(vertical[i], vertical[i+1])
else:
output = roll(output, vertical[i+1])
except Exception as e:
print('ERROR: unknow type of annotations')
return output
def annot2vector(annot, duration, time_r, type='voice'):
"""Transforms the annotations into frame vector wrt a time resolution.
Parameters
----------
annot : list
annotations only horizontal level
(for example: annotations['annot']['lines'])
dur : float
duration of the vector (for adding zeros).
time_r : float
time resolution for discriticing the time.
type : str
'voice': each frame has a value 1 or 0 for voice or not voice.
'notes': each frame has the freq value of the main vocal melody.
"""
singal = np.zeros(int(duration / time_r))
for note in annot:
b, e = note['time']
b = np.round(b/time_r).astype(int)
e = np.round(e/time_r).astype(int)
if type == 'voice':
singal[b:e+1] = 1
if type == 'melody':
singal[b:e+1] = np.mean(note['freq'])
return singal
def annot2vector_chopping(annot, dur, time_r, win_bin, hop_bin, type='voice'):
"""
Transforms the annotations into a frame vector by:
1 - creating a vector singal for a give sample rate
2 - chopping it using the given hop and wind size.
Parameters
----------
annot : list
annotations only horizontal level
(for example: annotations['annot']['lines'])
dur : float
duration of the vector (for adding zeros).
time_r : float
sample rate for discriticing annots.
win_bin : int
window size in bins for sampling the vector.
hop_bin: int
hope size in bins for sampling the vector.
type :str
'voice': each frame has a value 1 or 0 for voice or not voice.
'notes': each frame has the freq value of the main vocal melody.
"""
output = []
try:
singal = annot2vector(annot, dur, time_r, type)
win = np.hanning(win_bin)
win_sum = np.sum(win)
v = hop_bin*np.arange(int((len(singal)-win_bin)/hop_bin+1))
output = np.array([np.sum(win[::-1]*singal[i:i+win_bin])/win_sum
for i in v]).T
except Exception as e:
print('ERROR: unknow type of annotations')
return output
def get_audio(dali_info, path_output, skip=[], keep=[]):
"""Get the audio for the dali dataset.
It can download the whole dataset or only a subset of the dataset
by providing either the ids to skip or the ids that to load.
Parameters
----------
dali_info : list
where elements are ['DALI_ID', 'NAME', 'YOUTUBE', 'WORKING']
path_output : str
full path for storing the audio
skip : list
list with the ids to be skipped.
keep : list
list with the ids to be keeped.
"""
errors = []
ydl = get_my_ydl(path_output)
if len(keep) > 0:
for i in dali_info[1:]:
if i[0] in keep:
audio_from_url(i[-2], i[0], ydl, errors)
else:
for i in dali_info[1:]:
if i[0] not in skip:
audio_from_url(i[-2], i[0], ydl, errors)
return errors
|
<reponame>baajur/PALM<gh_stars>100-1000
# coding=utf-8
import paddlepalm as palm
import json
if __name__ == '__main__':
max_seqlen = 512
batch_size = 4
num_epochs = 2
lr = 1e-3
vocab_path = './pretrain/ernie/vocab.txt'
train_file = './data/cls4mrqa/train.tsv'
predict_file = './data/cls4mrqa/dev.tsv'
config = json.load(open('./pretrain/ernie/ernie_config.json'))
# ernie = palm.backbone.ERNIE(...)
ernie = palm.backbone.ERNIE.from_config(config)
# cls_reader2 = palm.reader.cls(train_file_topic, vocab_path, batch_size, max_seqlen)
# cls_reader3 = palm.reader.cls(train_file_subj, vocab_path, batch_size, max_seqlen)
# topic_trainer = palm.Trainer('topic_cls', cls_reader2, cls)
# subj_trainer = palm.Trainer('subj_cls', cls_reader3, cls)
# 创建该分类任务的reader,由诸多参数控制数据集读入格式、文件数量、预处理规则等
cls_reader = palm.reader.ClassifyReader(vocab_path, max_seqlen)
<<<<<<< HEAD:test/test2/run.py
cls_reader2 = palm.reader.ClassifyReader(vocab_path, max_seqlen)
=======
predict_cls_reader = palm.reader.ClassifyReader(vocab_path, max_seqlen, phase='predict')
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
print(cls_reader.outputs_attr)
print(predict_cls_reader.outputs_attr)
# 不同的backbone会对任务reader有不同的特征要求,例如对于分类任务,基本的输入feature为token_ids和label_ids,但是对于BERT,还要求从输入中额外提取position、segment、input_mask等特征,因此经过register后,reader会自动补充backbone所要求的字段
cls_reader.register_with(ernie)
cls_reader2.register_with(ernie)
print(cls_reader.outputs_attr)
<<<<<<< HEAD:test/test2/run.py
print("preparing data...")
print(cls_reader.num_examples)
cls_reader.load_data(train_file, batch_size)
cls_reader2.load_data(train_file, batch_size)
=======
print(predict_cls_reader.outputs_attr)
print("preparing data...")
print(cls_reader.num_examples)
cls_reader.load_data(train_file, batch_size, num_epochs=num_epochs)
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
print(cls_reader.num_examples)
print('done!')
# 创建任务头(task head),如分类、匹配、机器阅读理解等。每个任务头有跟该任务相关的必选/可选参数。注意,任务头与reader是解耦合的,只要任务头依赖的数据集侧的字段能被reader提供,那么就是合法的
cls_head = palm.head.Classify(4, 1024, 0.1)
<<<<<<< HEAD:test/test2/run.py
cls_head2 = palm.head.Classify(4, 1024, 0.1)
# 根据reader和任务头来创建一个训练器trainer,trainer代表了一个训练任务,内部维护着训练进程、和任务的关键信息,并完成合法性校验,该任务的模型保存、载入等相关规则控制
trainer = palm.Trainer('cls')
trainer2 = palm.Trainer('senti_cls')
mh_trainer = palm.MultiHeadTrainer([trainer, trainer2])
=======
# 根据reader和任务头来创建一个训练器trainer,trainer代表了一个训练任务,内部维护着训练进程、和任务的关键信息,并完成合法性校验,该任务的模型保存、载入等相关规则控制
trainer = palm.Trainer('senti_cls')
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
# match4mrqa.reuse_head_with(mrc4mrqa)
# data_vars = cls_reader.build()
# output_vars = ernie.build(data_vars)
# cls_head.build({'backbone': output_vars, 'reader': data_vars})
<<<<<<< HEAD:test/test2/run.py
loss_var = mh_trainer.build_forward(ernie, [cls_head, cls_head2])
n_steps = cls_reader.num_examples * num_epochs // batch_size
warmup_steps = int(0.1 * n_steps)
print(warmup_steps)
sched = palm.lr_sched.TriangularSchedualer(warmup_steps, n_steps)
=======
loss_var = trainer.build_forward(ernie, cls_head)
# controller.build_forward()
# Error! a head/backbone can be only build once! Try NOT to call build_forward method for any Trainer!
# n_steps = cls_reader.num_examples * num_epochs // batch_size
# warmup_steps = int(0.1 * n_steps)
# print(warmup_steps)
# sched = palm.lr_sched.TriangularSchedualer(warmup_steps, n_steps)
sched = None
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
adam = palm.optimizer.Adam(loss_var, lr, sched)
mh_trainer.build_backward(optimizer=adam, weight_decay=0.001)
# mh_trainer.random_init_params()
mh_trainer.load_pretrain('pretrain/ernie/params')
# trainer.train(iterator_fn, print_steps=1, save_steps=5, save_path='outputs', save_type='ckpt,predict')
<<<<<<< HEAD:test/test2/run.py
mh_trainer.fit_readers_with_mixratio([cls_reader, cls_reader2], 'cls', 2)
mh_trainer.train(print_steps=1)
# trainer.save()
=======
trainer.fit_reader(cls_reader)
trainer.train(print_steps=1)
# trainer.save()
print('prepare to predict...')
pred_ernie = palm.backbone.ERNIE.from_config(config, phase='pred')
cls_pred_head = palm.head.Classify(4, 1024, phase='pred')
trainer.build_predict_forward(pred_ernie, cls_pred_head)
predict_cls_reader.load_data(predict_file, 8)
print(predict_cls_reader.num_examples)
predict_cls_reader.register_with(pred_ernie)
trainer.fit_reader(predict_cls_reader, phase='predict')
print('predicting..')
trainer.predict(print_steps=20)
# controller = palm.Controller([mrqa, match4mrqa, mlm4mrqa])
# loss = controller.build_forward(bb, mask_task=[])
# n_steps = controller.estimate_train_steps(basetask=mrqa, num_epochs=2, batch_size=8, dev_count=4)
# adam = palm.optimizer.Adam(loss)
# sched = palm.schedualer.LinearWarmup(learning_rate, max_train_steps=n_steps, warmup_steps=0.1*n_steps)
#
# controller.build_backward(optimizer=adam, schedualer=sched, weight_decay=0.001, use_ema=True, ema_decay=0.999)
# controller.random_init_params()
# controller.load_pretrain('../../pretrain_model/ernie/params')
# controller.train()
# controller = palm.Controller(config='config.yaml', task_dir='tasks', for_train=False)
# controller.pred('mrqa', inference_model_dir='output_model/secondrun/mrqa/infer_model')
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
|
import copy
import itertools
import json
import logging
import os
from collections import OrderedDict
import numpy as np
from PIL import Image, ImageDraw
import pycocotools.mask as mask_util
import torch
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.modeling.matcher import Matcher
from detectron2.structures import Boxes, pairwise_iou
from detectron2.utils import comm
from fibercnn.visualization.utilities import display_image
from fvcore.common.file_io import PathManager
class FiberEvaluator(DatasetEvaluator):
"""
Evaluate predicted fiber lengths and fiber widths of instances.
"""
def __init__(self, dataset_name, cfg, distributed, output_dir=None):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
cfg (CfgNode): config instance
distributed (True): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains:
"instances_results.json" a json file containing the evaluation results.
"""
self._predictions = []
self._fiber_results = []
self._results = None
# Matcher to assign predictions to annotations
self._bbox_matcher = Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
)
self._tasks = ("fiberwidth", "fiberlength")
self._modes = ("strict", "loose")
self._distributed = distributed
self._output_dir = output_dir
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
self._metadata = MetadataCatalog.get(dataset_name)
assert hasattr(
self._metadata, "json_file"
), f"json_file was not found in MetaDataCatalog for '{dataset_name}'"
self._get_annotations()
def _get_annotations(self):
json_file = PathManager.get_local_path(self._metadata.json_file)
with open(json_file) as f:
self._annotations = json.load(f)["annotations"]
self._convert_annotation_bboxes()
def _convert_annotation_bboxes(self):
for annotation in self._annotations:
x1, y1, width, height = annotation["bbox"]
new_bbox = torch.tensor([x1, y1, x1 + width, y1 + height])
new_bbox = new_bbox.unsqueeze(0)
new_bbox = Boxes(new_bbox)
annotation["bbox"] = new_bbox
def reset(self):
self._predictions = []
self._fiber_results = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a FibeRCNN model
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a FibeRCNN model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
# TODO this is ugly
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_evaluatable_format(
instances, input["image_id"]
)
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
self._predictions.append(prediction)
def evaluate(self):
if self._distributed:
comm.synchronize()
self._predictions = comm.gather(self._predictions, dst=0)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return {}
if len(self._predictions) == 0:
self._logger.warning("[FiberEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(self._predictions, f)
self._results = OrderedDict()
if "instances" in self._predictions[0]:
self._eval_predictions()
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _eval_predictions(self):
"""
Evaluate self._predictions on the given tasks.
Fill self._results with the metrics of the tasks.
"""
self._fiber_results = list(itertools.chain(*[x["instances"] for x in self._predictions]))
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
reverse_id_mapping = {
v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
}
for result in self._fiber_results:
category_id = result["category_id"]
assert (
category_id in reverse_id_mapping
), "A prediction has category_id={}, which is not available in the dataset.".format(
category_id
)
result["category_id"] = reverse_id_mapping[category_id]
self._logger.info("Evaluating predictions ...")
annotation_image_ids = set(_extract_instances_property(self._annotations, "image_id"))
for task in self._tasks:
self._logger.info(f"Task: {task}")
self._results[task] = {}
for mode in self._modes:
percentage_errors = []
for image_id in annotation_image_ids:
image_predictions = _filter_by_image_id(self._fiber_results, image_id)
if len(image_predictions) == 0:
continue
image_annotations = _filter_by_image_id(self._annotations, image_id)
matched_image_annotations, matched_labels = self._match_annotations(
image_annotations, image_predictions
)
percentage_errors.append(
_get_percentage_errors(
image_predictions, matched_image_annotations, matched_labels, task, mode
)
)
percentage_errors = np.concatenate(percentage_errors)
mean_absolute_percentage_error = np.mean(np.abs(percentage_errors))
self._results[task][f"MAPE_{mode}"] = mean_absolute_percentage_error
self._logger.info(f"MAPE_{mode}: {mean_absolute_percentage_error}")
def _match_annotations(self, image_annotations, image_predictions):
# TODO: Evaluate the number of detected instances.
prediction_boxes = Boxes.cat(_extract_instances_property(image_predictions, "bbox"))
annotation_boxes = Boxes.cat(_extract_instances_property(image_annotations, "bbox"))
match_quality_matrix = pairwise_iou(annotation_boxes, prediction_boxes)
matched_idxs, matched_labels = self._bbox_matcher(match_quality_matrix)
matched_image_annotations = [image_annotations[i] for i in matched_idxs]
return matched_image_annotations, matched_labels
def _get_percentage_errors(
image_predictions, matched_image_annotations, matched_labels, measurand, mode
):
assert mode in ["strict", "loose"], f"Unexpected mode: {mode}"
is_valid_match = np.atleast_1d(matched_labels > 0)
targets = _extract_instances_property(matched_image_annotations, measurand)
targets = np.array(targets)
predictions = _extract_instances_property(image_predictions, measurand)
predictions = np.concatenate(predictions)
predictions = predictions * matched_labels.numpy()
if mode == "loose":
predictions = predictions[is_valid_match]
targets = targets[is_valid_match]
errors = predictions - targets
percentage_errors = errors / targets * 100
return percentage_errors
def _extract_instances_property(instances, property_name):
return [annotation[property_name] for annotation in instances]
def instances_to_evaluatable_format(instances, img_id):
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
has_fiberlength = instances.has("pred_fiberlength")
if has_fiberlength:
fiberlengths = instances.pred_fiberlength
fiberlengths = np.array(fiberlengths)
has_fiberwidth = instances.has("pred_fiberwidth")
if has_fiberwidth:
fiberwidths = instances.pred_fiberwidth
fiberwidths = np.array(fiberwidths)
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
if has_mask:
result["segmentation"] = rles[k]
if has_keypoints:
# In COCO annotations,
# keypoints coordinates are pixel indices.
# However our predictions are floating point coordinates.
# Therefore we subtract 0.5 to be consistent with the annotation format.
# This is the inverse of data loading logic in `datasets/coco.py`.
keypoints[k][:, :2] -= 0.5
result["keypoints"] = keypoints[k].flatten().tolist()
if has_fiberlength:
result["fiberlength"] = fiberlengths[k]
if has_fiberwidth:
result["fiberwidth"] = fiberwidths[k]
results.append(result)
return results
def _filter_by_image_id(data, image_id):
data = [date for date in data if date["image_id"] == image_id]
return data
|
import inflection
from pampy import match, _, TAIL
from copier import copy
from horn.path import TPL_PATH, get_location
from horn.tpl import get_proj_info, merge_fields, validate_type, validate_attr, validate_opts
TYPES = {
'integer': 'Integer',
'float': 'Float',
'numeric': 'Numeric',
'boolean': 'Boolean',
'string': 'String',
'text': 'Text',
'date': 'Date',
'time': 'Time',
'datetime': 'DateTime',
'uuid': 'UUID',
'json': 'JSON',
'array': 'ARRAY',
'decimal': 'Numeric',
'ref': 'reference',
}
AFFIXES = ('uniq', 'nonull', 'index')
def run(opts):
validate_opts(opts)
bindings = {
'module': opts.get('<module>'),
'singular': inflection.underscore(opts.get('<module>')),
'table': inflection.underscore(opts.get('<table>')),
'fields': parse_fields(opts.get('<fields>')),
'has_ref': any([':ref:' in f for f in opts['<fields>']])
}
bindings.update(get_proj_info())
location = get_location(bindings) or TPL_PATH
copy(f'{location}/gen', '.', data=bindings, exclude=['*/schemas/*', '*/views/*', 'tests/*'])
def resolve_assign(ftype, default):
"""
>>> resolve_assign('xxx', 'none')
'None'
>>> resolve_assign('ref', '100')
100
>>> try:
... resolve_assign('ref', 'apple')
... except:
... pass
Error: Default value must be an integer
>>> resolve_assign('float', '99.9')
'99.9'
>>> resolve_assign('boolean', 'false')
'False'
>>> try:
... resolve_assign('boolean', 'apple')
... except:
... pass
Error: Boolean field error, apple
>>> resolve_assign('ooo', 'elixir')
"'elixir'"
"""
rv = default
if default == 'none':
rv = 'None'
elif ftype == 'ref':
try:
rv = int(default)
except ValueError:
print('Error: Default value must be an integer')
exit(1)
elif ftype in ['integer', 'float', 'numeric']:
pass
elif ftype in ['boolean']:
if default in ['true', 'false']:
rv = inflection.camelize(default)
else:
print(f'Error: Boolean field error, {default}')
exit(1)
else:
rv = f"'{rv}'"
return rv
def parse_fields(fields):
from .schema import AFFIXES as SCH_AFFIXES
attrs = [f.split(':') for f in fields]
return [match(
attr,
[_, 'default', _, 'ref', _, TAIL], lambda x, val, tab, t: merge_fields({'field': x, 'cam_field': inflection.camelize(x), 'type': validate_type('ref', TYPES), 'table': tab, 'default': resolve_assign('ref', val)}, validate_attr(t, AFFIXES, SCH_AFFIXES)), # noqa: E241,E272
[_, 'ref', _, 'default', _, TAIL], lambda x, tab, val, t: merge_fields({'field': x, 'cam_field': inflection.camelize(x), 'type': validate_type('ref', TYPES), 'table': tab, 'default': resolve_assign('ref', val)}, validate_attr(t, AFFIXES, SCH_AFFIXES)), # noqa: E241,E272
[_, 'ref', _, TAIL], lambda x, tab, t: merge_fields({'field': x, 'cam_field': inflection.camelize(x), 'type': validate_type('ref', TYPES), 'table': tab}, validate_attr(t, AFFIXES, SCH_AFFIXES)), # noqa: E241,E272
[_, _, 'default', _, TAIL], lambda x, y, val, t: merge_fields({'field': x, 'type': validate_type(y, TYPES), 'default': resolve_assign(y, val)}, validate_attr(t, AFFIXES, SCH_AFFIXES)), # noqa: E241,E272
[_, _, TAIL], lambda x, y, t: merge_fields({'field': x, 'type': validate_type(y, TYPES)}, validate_attr(t, AFFIXES, SCH_AFFIXES)) # noqa: E241,E272
) for attr in attrs]
|
"""
map2psql.py - convert maf formatted file to a psl formatted file
================================================================
:Tags: Python
Purpose
-------
convert a maf file to a psl file.
Usage
-----
Type::
python <script_name>.py --help
for command line help.
Command line options
--------------------
"""
import sys
import CGAT.Experiment as E
try:
from bx.align import maf
from bx.align.tools import get_components_for_species
except AttributeError:
pass
import CGAT.Blat as Blat
def threaditer(reader, species):
'''iterate over reader and return components for species.'''
for m in reader:
components = get_components_for_species(m, species)
if components is not None:
yield components
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: maf2psl.py 2879 2010-04-06 14:44:34Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-q", "--query", dest="query", type="string",
help="sequence to use for query [default=%default].")
parser.add_option("-t", "--target", dest="target", type="string",
help="sequence to use for target [default=%default].")
parser.set_defaults(
query=None,
target=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if options.query is None or options.target is None:
if len(args) != 2:
raise ValueError(
"please supply two sequence identifiers for query and target")
options.query, options.target = args
# do sth
ninput, nskipped, noutput = 0, 0, 0
reader = maf.Reader(options.stdin)
psl = Blat.Match()
for cc in threaditer(reader, (options.query, options.target)):
ninput += 1
query, target = cc
# treat identfiers like Hsap.GL000223.1
try:
data = query.src.split(".")
qs, qcontig = data[0], ".".join(data[1:])
except ValueError as msg:
raise ValueError(
"error: could not parse query %s: msg=%s" % (query.src, msg))
try:
data = target.src.split(".")
ts, tcontig = data[0], ".".join(data[1:])
except ValueError as msg:
raise ValueError(
"error: could not parse target %s: msg=%s" % (target.src, msg))
assert qs == options.query
assert ts == options.target
psl.mQueryId = qcontig
psl.mSbjctId = tcontig
psl.fromPair(query.start, query.src_size, query.strand, query.text.upper(),
target.start, target.src_size, target.strand, target.text.upper())
E.debug("%s\t%s\t%i\t%i\t%s\t%s" %
(qs, qcontig, query.start, query.src_size, query.strand, query.text))
E.debug("%s\t%s\t%i\t%i\t%s\t%s" %
(ts, tcontig, target.start, target.src_size, target.strand, target.text))
options.stdout.write("%s\n" % str(psl))
noutput += 1
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'tools/TranslateTool/translatetoolgui.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TranslateToolGUI(object):
def setupUi(self, TranslateToolGUI):
TranslateToolGUI.setObjectName("TranslateToolGUI")
TranslateToolGUI.resize(189, 157)
self.centralwidget = QtWidgets.QWidget(TranslateToolGUI)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 186, 155))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.x_label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.x_label.setObjectName("x_label")
self.gridLayout.addWidget(self.x_label, 0, 0, 1, 1)
self.y_label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.y_label.setFocusPolicy(QtCore.Qt.NoFocus)
self.y_label.setObjectName("y_label")
self.gridLayout.addWidget(self.y_label, 1, 0, 1, 1)
self.z_label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.z_label.setObjectName("z_label")
self.gridLayout.addWidget(self.z_label, 2, 0, 1, 1)
self.y_trans = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.y_trans.setFocusPolicy(QtCore.Qt.ClickFocus)
self.y_trans.setObjectName("y_trans")
self.gridLayout.addWidget(self.y_trans, 1, 1, 1, 1)
self.x_trans = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.x_trans.setFocusPolicy(QtCore.Qt.ClickFocus)
self.x_trans.setClearButtonEnabled(False)
self.x_trans.setObjectName("x_trans")
self.gridLayout.addWidget(self.x_trans, 0, 1, 1, 1)
self.z_trans = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.z_trans.setFocusPolicy(QtCore.Qt.ClickFocus)
self.z_trans.setObjectName("z_trans")
self.gridLayout.addWidget(self.z_trans, 2, 1, 1, 1)
self.m1 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.m1.setObjectName("m1")
self.gridLayout.addWidget(self.m1, 0, 2, 1, 1)
self.m2 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.m2.setObjectName("m2")
self.gridLayout.addWidget(self.m2, 1, 2, 1, 1)
self.m3 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.m3.setObjectName("m3")
self.gridLayout.addWidget(self.m3, 2, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.cancel_button = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.cancel_button.setObjectName("cancel_button")
self.horizontalLayout.addWidget(self.cancel_button)
self.apply_button = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.apply_button.setDefault(True)
self.apply_button.setObjectName("apply_button")
self.horizontalLayout.addWidget(self.apply_button)
self.verticalLayout.addLayout(self.horizontalLayout)
TranslateToolGUI.setCentralWidget(self.centralwidget)
self.retranslateUi(TranslateToolGUI)
QtCore.QMetaObject.connectSlotsByName(TranslateToolGUI)
def retranslateUi(self, TranslateToolGUI):
_translate = QtCore.QCoreApplication.translate
TranslateToolGUI.setWindowTitle(_translate("TranslateToolGUI", "Scale Tool"))
self.label.setText(_translate("TranslateToolGUI", "Translate Tool"))
self.x_label.setText(_translate("TranslateToolGUI", "X"))
self.y_label.setText(_translate("TranslateToolGUI", "Y"))
self.z_label.setText(_translate("TranslateToolGUI", "Z"))
self.y_trans.setText(_translate("TranslateToolGUI", "0.0"))
self.x_trans.setText(_translate("TranslateToolGUI", "0.0"))
self.z_trans.setText(_translate("TranslateToolGUI", "0.0"))
self.m1.setText(_translate("TranslateToolGUI", "m"))
self.m2.setText(_translate("TranslateToolGUI", "m"))
self.m3.setText(_translate("TranslateToolGUI", "m"))
self.cancel_button.setText(_translate("TranslateToolGUI", "Cancel"))
self.apply_button.setText(_translate("TranslateToolGUI", "Apply"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.