code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/python
import xml.etree.ElementTree as ET
import numpy as np
import os,sys, time
import argparse
import yaml
import matplotlib.pyplot as plt
template_location=os.environ['HOME']+'/simsup_ws/src/simulation_supervised/simulation_supervised_demo/extensions/templates/'
prefab_textures=['Gazebo/Grey','Gazebo/Blue','Gazebo/Red','Gazebo/Green','Gazebo/White','Gazebo/Black']
prefab_obstacles=['person_standing','bookshelf']
def generate_panel(dummy='', # used to set a dummy argument in order to avoid empty argument dictionaries
height=None,
width=None,
thickness=None,
z_location=None,
offset=None,
texture=None,
wall=None,
verbose=False):
"""
Args:
height of the panel
width of the panel
z_location above the ground
offset is the relative position in the corridor segment
specific texture that has to be found in simsup_demo/extensions/textures
wall (left, right, front or back) ==> if '' pick random (left,right)
Returns model placed in centered segment.
"""
# fill in randomly all that is not specified
if height==None: height = np.random.uniform(0.1,2)
if width==None: width = np.random.uniform(0.1,2)
if thickness==None: thickness = np.random.uniform(0.001,0.3)
if z_location==None: z_location = np.random.uniform(0,1) #ALLWAYS BETWEEN 0 and 1
if offset==None: offset = np.random.uniform(-0.5,0.5)
if texture==None: texture = np.random.choice(prefab_textures)
if wall==None: texture = np.random.choice(["right","left"])
if verbose: print("[generate_panel]: height {0}, width {1}, thicknes {2}, z_location {3}, offset {4}, texture {5}, wall {6}".format(height,width,thickness,z_location,offset,texture,wall))
# Get template
panel_tree = ET.parse(template_location+'panel.xml')
panel = panel_tree.getroot().find('world').find('model')
# change shape of panel according to heigth and width
for child in iter(['collision', 'visual']):
size_el=panel.find('link').find(child).find('geometry').find('box').find('size')
# size=[float(v) for v in size_el.text.split(' ')]
# thickness is multiplied as the center of the panel remains in the wall
# this ensures the offset by multiplying with width/2 remains correctly
size_el.text=str(2*thickness)+" "+str(width)+" "+str(height)
# change position of panel according to wall, z_location and offset
pose_el=panel.find('pose')
offset = np.sign(offset)*(np.abs(offset)-width/2)
position={"left":[-1, offset],
"right":[1, offset],
"front":[offset, 1],
"back":[offset, -1]}
orientation={"left":0,
"right":0,
"front":1.57,
"back":1.57}
pose_6d = [float(v) for v in pose_el.text.split(' ')]
pose_el.text=str(position[wall][0])+" "+str(position[wall][1])+" "+str(z_location*(2-height)+height/2.)+" "+str(pose_6d[3])+" "+str(pose_6d[4])+" "+str(orientation[wall])
# adjust texture
material=panel.find('link').find('visual').find('material').find('script').find('name')
material.text=texture
return panel
def generate_passway(dummy='',
name=None,
model_dir='',
offset=None,
texture=None,
verbose=False):
"""Get a passway from simsup_demo/models and place it in the middle of the tile,
adjust texture and offset.
"""
if name == None: name=np.random.choice(['arc','doorway'])
if offset == None: offset=np.random.uniform(-0.8,0.8)
if texture==None: texture = np.random.choice(prefab_textures)
if not model_dir: model_dir=os.environ['HOME']+'/simsup_ws/src/simulation_supervised/simulation_supervised_demo/models'
if not os.path.isfile(model_dir+'/'+name+'/model.sdf'):
print("[extension_generator]: failed to load model {0}, not in {1}".format(name, model_dir))
return -1
if verbose: print("[generate_passway]: name {0}, offset {1}, texture {2}".format(name, offset, texture))
# load element
passway_tree = ET.parse(model_dir+'/'+name+'/model.sdf')
passway = passway_tree.getroot().find('model')
# adjust position
pose_6d = [float(v) for v in passway.find('pose').text.split(' ')]
pose_6d[1] = offset
passway.find('pose').text = str(pose_6d[0])+' '+str(pose_6d[1])+' '+str(pose_6d[2])+' '+str(pose_6d[3])+' '+str(pose_6d[4])+' '+str(pose_6d[5])
# adjust texture
material=passway.find('link').find('visual').find('material').find('script').find('name')
material.text=texture
return passway
def generate_obstacle(dummy='',
name=None,
model_dir='',
side_offset=None,
offset=None,
wall=None,
verbose=False):
""" Get an element from simsup_demo/models and place it on the side of the tile.
"""
if name == None: name=np.random.choice(prefab_obstacles)
if side_offset == None: offset=np.random.uniform(0.7,0.9)
if offset == None: offset=np.random.uniform(-0.5,0.5)
if wall == None: wall = np.random.choice(["right","left"])
if not model_dir: model_dir=os.environ['HOME']+'/simsup_ws/src/simulation_supervised/simulation_supervised_demo/models'
if not os.path.isfile(model_dir+'/'+name+'/model.sdf'):
print("[extension_generator]: failed to load model {0}, not in {1}".format(name, model_dir))
return -1
if verbose: print("[generate_obstacle]: name {0}, offset {1}, wall {2}".format(name, offset, wall))
# load element
obstacle_tree = ET.parse(model_dir+'/'+name+'/model.sdf')
obstacle = obstacle_tree.getroot().find('model')
# adjust position
position={"left":[-side_offset, offset],
"right":[side_offset, offset],
"front":[offset, side_offset],
"back":[offset, -side_offset]}
orientation={"left":0,
"right":0,
"front":1.57,
"back":1.57}
pose_6d = [float(v) for v in obstacle.find('pose').text.split(' ')]
obstacle.find('pose').text = str(position[wall][0])+" "+str(position[wall][1])+" "+str(pose_6d[2])+" "+str(pose_6d[3])+" "+str(pose_6d[4])+" "+str(orientation[wall])
return obstacle
def generate_ceiling(dummy='',
name=None,
model_dir='',
tile_type=1,
length = 2,
texture=None,
verbose=False):
"""Load model with name 'name_tile_type' {1,2 or 3} from model_dir.
Adjust the texture accordingly.
"""
if tile_type in [0,4]: tile_type=1 #the start and end tile is the same as straight
if name == None: name=np.random.choice(['pipes','ceiling'])
if not model_dir: model_dir=os.environ['HOME']+'/simsup_ws/src/simulation_supervised/simulation_supervised_demo/models'
if not os.path.isfile(model_dir+'/'+name+'_'+str(tile_type)+'/model.sdf'):
print("[extension_generator]: failed to load model {0}, not in {1}".format(name, model_dir))
return -1
if verbose: print("[generate_ceiling]: name {0}, tile_type {1}, texture {2}".format(name, tile_type, texture))
# load element
ceiling_tree = ET.parse(model_dir+'/'+name+'_'+str(tile_type)+'/model.sdf')
ceiling_models = ceiling_tree.getroot().findall('model')
# adjust length of elements
if name in ['pipes']:
for ceiling in ceiling_models:
for child in iter(['collision', 'visual']):
length_el=ceiling.find('link').find(child).find('geometry').find('cylinder').find('length')
length_el.text = str(length)
elif name == 'ceiling':
# add straight segments before the centered segment
extra_length = (length-1.)/2. #0.5 in case of length 2
for i in np.arange(0, extra_length, 1):
# add straight segments on correct location according to tile type
straight_ceiling_tree = ET.parse(model_dir+'/'+name+'_'+str(1)+'/model.sdf')
straight_ceiling_models = straight_ceiling_tree.getroot().findall('model')
for ceiling in straight_ceiling_models:
pose_6d=[float(v) for v in ceiling.find('pose').text.split(' ')]
pose_6d[1] = -1*(i+1)
ceiling.find('pose').text=str(pose_6d[0])+' '+str(pose_6d[1])+' '+str(pose_6d[2])+' '+str(pose_6d[3])+' '+str(pose_6d[4])+' '+str(pose_6d[5])
ceiling_models.append(ceiling)
# add straight segments before the centered segment
extra_length = (length-1.)/2. #0.5 in case of length 2
for i in np.arange(0, extra_length, 1):
# add straight segments on correct location according to tile type
straight_ceiling_tree = ET.parse(model_dir+'/'+name+'_'+str(1)+'/model.sdf')
straight_ceiling_models = straight_ceiling_tree.getroot().findall('model')
for ceiling in straight_ceiling_models:
pose_6d=[float(v) for v in ceiling.find('pose').text.split(' ')]
if tile_type == 1:
pose_6d[1] = (i+1)
elif tile_type == 2:
pose_6d[0] = (i+1)
pose_6d[5] = 1.57
elif tile_type == 3:
pose_6d[0] = -(i+1)
pose_6d[5] = 1.57
ceiling.find('pose').text=str(pose_6d[0])+' '+str(pose_6d[1])+' '+str(pose_6d[2])+' '+str(pose_6d[3])+' '+str(pose_6d[4])+' '+str(pose_6d[5])
ceiling_models.append(ceiling)
# adjust texture
if texture != None:
for ceiling in ceiling_models:
material=ceiling.find('link').find('visual').find('material').find('script').find('name')
material.text=texture
return ceiling_models
def generate_blocked_hole(wall,
width,
height,
dummy='',
name='blocked_hole_segment.world',
world_dir='',
texture=None,
corridor_type='normal',
verbose=False):
"""
wall: wall has to specified
name: name of segment world file from which block_hole is extracted
modeldir: world directory
texture: in case of None take default defined in blocked_hole_segment
verbose: talk more
"""
if len(world_dir)==0: world_dir=os.environ['HOME']+'/simsup_ws/src/simulation_supervised/simulation_supervised_demo/worlds'
if not os.path.isfile(world_dir+'/'+name):
print("[generate_blocked_hole]: failed to load model {0}, not in {1}".format(name, world_dir))
return -1
if verbose: print("[generate_blocked_hole]: name {0}, wall {1}, texture {2}".format(name, wall, texture))
# load model from blocked world segment
tree = ET.parse(world_dir+'/'+name)
root = tree.getroot()
world = root.find('world')
elements=[ m for m in world.findall('model') if m.attrib['name'].startswith('wall_'+wall) ]
# incase the corridor_type is not normal but empty, make the wall much larger
if corridor_type == 'empty':
height = 10
# adjust scale according to width and height
# wall are made in blocked_hole_segment of width 2 and height 2.5
# the inside wall are kept the same shape as they represent the door
# the front walls are adjusted to the width and height
for m in elements:
if 'front_left' in m.attrib['name'] or 'front_right' in m.attrib['name']:
# adjust width and height of left and right front wall
for e in ['collision','visual']:
size_element = m.find('link').find(e).find('geometry').find('box').find('size')
size = [float(v) for v in size_element.text.split(' ')]
# side with corresponds on the tile-width that influences the width of the wall
# the width of the tile influences both the length of the wall as the position from the center
# only the length should be changed (side_width) to a large value in case of empty corridor
side_width = width if corridor_type == 'normal' else 50
size[0] = (side_width-1)/2.
size[2] = height
size_element.text=str(size[0])+' '+str(size[1])+' '+str(size[2])
# adjust pose according to width
pose_element=m.find('pose')
pose_6d=[float(v) for v in pose_element.text.split(' ')]
if wall == 'right':
pose_6d[0] = width/2.
# half of the width of the front panel plus 0.5 for a door of width 1
pose_6d[1] = ((side_width-1)/2./2.+0.5)
pose_6d[1] = pose_6d[1] if 'front_left' in m.attrib['name'] else -pose_6d[1] #change in opposite direction on the right side
elif wall == 'left':
pose_6d[0] = -width/2.
pose_6d[1] = ((side_width-1)/2./2.+0.5)
pose_6d[1] = pose_6d[1] if 'front_left' in m.attrib['name'] else -pose_6d[1]
elif wall == 'front':
pose_6d[1] = width/2.
pose_6d[0] = ((side_width-1)/2./2.+0.5)
pose_6d[0] = pose_6d[0] if 'front_left' in m.attrib['name'] else -pose_6d[0]
elif wall == 'back':
pose_6d[1] = -width/2.
pose_6d[0] = ((side_width-1)/2./2.+0.5)
pose_6d[0] = pose_6d[0] if 'front_left' in m.attrib['name'] else -pose_6d[0]
pose_6d[2] = height/2.
pose_element.text = str(pose_6d[0])+' '+str(pose_6d[1])+' '+str(pose_6d[2])+' '+str(pose_6d[3])+' '+str(pose_6d[4])+' '+str(pose_6d[5])
if 'front_up' in m.attrib['name']:
# adjust height of up pannel
for e in ['collision','visual']:
size_element = m.find('link').find(e).find('geometry').find('box').find('size')
size = [float(v) for v in size_element.text.split(' ')]
size[2] = max(0.01, height-2)
size_element.text=str(size[0])+' '+str(size[1])+' '+str(size[2])
# adjust pose of up panel according to width and height
pose_element=m.find('pose')
pose_6d=[float(v) for v in pose_element.text.split(' ')]
# in case of right wall
if wall == 'right': pose_6d[0] = width/2.
elif wall == 'left': pose_6d[0] = -width/2.
elif wall == 'front': pose_6d[1] = width/2.
elif wall == 'back': pose_6d[1] = -width/2.
# half of the width of the front panel plus 0.5 for a door of width 1
pose_6d[2] = (height-2)/2.+2
pose_element.text = str(pose_6d[0])+' '+str(pose_6d[1])+' '+str(pose_6d[2])+' '+str(pose_6d[3])+' '+str(pose_6d[4])+' '+str(pose_6d[5])
if 'inside' in m.attrib['name']:
# move the door a bit back according to the width of the corridor
pose_element=m.find('pose')
pose_6d=[float(v) for v in pose_element.text.split(' ')]
# in case of right wall
if wall == 'right': pose_6d[0] = width/2.+0.25 if not 'back' in m.attrib['name'] else width/2.+0.5
elif wall == 'left': pose_6d[0] = -(width/2.+0.25) if not 'back' in m.attrib['name'] else -(width/2.+0.5)
elif wall == 'front': pose_6d[1] = width/2.+0.25 if not 'back' in m.attrib['name'] else width/2.+0.5
elif wall == 'back': pose_6d[1] = -(width/2.+0.25) if not 'back' in m.attrib['name'] else -(width/2.+0.5)
pose_element.text = str(pose_6d[0])+' '+str(pose_6d[1])+' '+str(pose_6d[2])+' '+str(pose_6d[3])+' '+str(pose_6d[4])+' '+str(pose_6d[5])
# change texture
if texture != None:
material_element= m.find('link').find('visual').find('material').find('script').find('name')
material_element.text = texture
return elements
def generate_floor(width,
dummy='', # used to set a dummy argument in order to avoid empty argument dictionaries
name=None,
texture=None,
corridor_type='normal',
tile_type=1,
wide_tile=True,
verbose=False):
"""
Args:
width of the tile
specific texture that has to be found in simsup_demo/extensions/textures
Returns model placed in centered segment.
Wide_tile if true: the floor is made bigger (used for covering floor of blocked hole)
"""
# fill in randomly all that is not specified
if texture==None: texture = np.random.choice(prefab_textures)
if name==None: name = np.random.choice(['floor'])
if verbose: print("[generate_floor]: texture {0}".format(texture))
# Get template
floor_tree = ET.parse(template_location+name+'.xml')
floor = floor_tree.getroot().find('world').find('model')
# change shape of floor according to heigth and width
for child in iter(['collision', 'visual']):
size_el=floor.find('link').find(child).find('geometry').find('box').find('size')
if corridor_type == 'empty' and tile_type != 4 and wide_tile:
size_el.text=str(20*width)+" "+str(20*width)+" "+size_el.text.split(' ')[-1]
if tile_type != 4 and wide_tile:
size_el.text=str(2*width)+" "+str(2*width)+" "+size_el.text.split(' ')[-1]
else:
size_el.text=str(width)+" "+str(width)+" "+size_el.text.split(' ')[-1]
# adjust texture
material=floor.find('link').find('visual').find('material').find('script').find('name')
material.text=texture
return floor
| [
"numpy.random.uniform",
"xml.etree.ElementTree.parse",
"numpy.random.choice",
"numpy.abs",
"os.path.isfile",
"numpy.arange",
"numpy.sign"
] | [((1716, 1757), 'xml.etree.ElementTree.parse', 'ET.parse', (["(template_location + 'panel.xml')"], {}), "(template_location + 'panel.xml')\n", (1724, 1757), True, 'import xml.etree.ElementTree as ET\n'), ((3887, 3934), 'xml.etree.ElementTree.parse', 'ET.parse', (["(model_dir + '/' + name + '/model.sdf')"], {}), "(model_dir + '/' + name + '/model.sdf')\n", (3895, 3934), True, 'import xml.etree.ElementTree as ET\n'), ((5275, 5322), 'xml.etree.ElementTree.parse', 'ET.parse', (["(model_dir + '/' + name + '/model.sdf')"], {}), "(model_dir + '/' + name + '/model.sdf')\n", (5283, 5322), True, 'import xml.etree.ElementTree as ET\n'), ((9951, 9983), 'xml.etree.ElementTree.parse', 'ET.parse', (["(world_dir + '/' + name)"], {}), "(world_dir + '/' + name)\n", (9959, 9983), True, 'import xml.etree.ElementTree as ET\n'), ((15359, 15402), 'xml.etree.ElementTree.parse', 'ET.parse', (["(template_location + name + '.xml')"], {}), "(template_location + name + '.xml')\n", (15367, 15402), True, 'import xml.etree.ElementTree as ET\n'), ((1086, 1111), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(2)'], {}), '(0.1, 2)\n', (1103, 1111), True, 'import numpy as np\n'), ((1137, 1162), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(2)'], {}), '(0.1, 2)\n', (1154, 1162), True, 'import numpy as np\n'), ((1196, 1225), 'numpy.random.uniform', 'np.random.uniform', (['(0.001)', '(0.3)'], {}), '(0.001, 0.3)\n', (1213, 1225), True, 'import numpy as np\n'), ((1261, 1284), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1278, 1284), True, 'import numpy as np\n'), ((1337, 1365), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1354, 1365), True, 'import numpy as np\n'), ((1395, 1428), 'numpy.random.choice', 'np.random.choice', (['prefab_textures'], {}), '(prefab_textures)\n', (1411, 1428), True, 'import numpy as np\n'), ((1456, 1491), 'numpy.random.choice', 'np.random.choice', (["['right', 'left']"], {}), "(['right', 'left'])\n", (1472, 1491), True, 'import numpy as np\n'), ((2386, 2401), 'numpy.sign', 'np.sign', (['offset'], {}), '(offset)\n', (2393, 2401), True, 'import numpy as np\n'), ((3293, 3329), 'numpy.random.choice', 'np.random.choice', (["['arc', 'doorway']"], {}), "(['arc', 'doorway'])\n", (3309, 3329), True, 'import numpy as np\n'), ((3357, 3385), 'numpy.random.uniform', 'np.random.uniform', (['(-0.8)', '(0.8)'], {}), '(-0.8, 0.8)\n', (3374, 3385), True, 'import numpy as np\n'), ((3415, 3448), 'numpy.random.choice', 'np.random.choice', (['prefab_textures'], {}), '(prefab_textures)\n', (3431, 3448), True, 'import numpy as np\n'), ((3580, 3633), 'os.path.isfile', 'os.path.isfile', (["(model_dir + '/' + name + '/model.sdf')"], {}), "(model_dir + '/' + name + '/model.sdf')\n", (3594, 3633), False, 'import os, sys, time\n'), ((4630, 4664), 'numpy.random.choice', 'np.random.choice', (['prefab_obstacles'], {}), '(prefab_obstacles)\n', (4646, 4664), True, 'import numpy as np\n'), ((4698, 4725), 'numpy.random.uniform', 'np.random.uniform', (['(0.7)', '(0.9)'], {}), '(0.7, 0.9)\n', (4715, 4725), True, 'import numpy as np\n'), ((4753, 4781), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (4770, 4781), True, 'import numpy as np\n'), ((4807, 4842), 'numpy.random.choice', 'np.random.choice', (["['right', 'left']"], {}), "(['right', 'left'])\n", (4823, 4842), True, 'import numpy as np\n'), ((4976, 5029), 'os.path.isfile', 'os.path.isfile', (["(model_dir + '/' + name + '/model.sdf')"], {}), "(model_dir + '/' + name + '/model.sdf')\n", (4990, 5029), False, 'import os, sys, time\n'), ((6266, 6304), 'numpy.random.choice', 'np.random.choice', (["['pipes', 'ceiling']"], {}), "(['pipes', 'ceiling'])\n", (6282, 6304), True, 'import numpy as np\n'), ((9638, 9676), 'os.path.isfile', 'os.path.isfile', (["(world_dir + '/' + name)"], {}), "(world_dir + '/' + name)\n", (9652, 9676), False, 'import os, sys, time\n'), ((15169, 15202), 'numpy.random.choice', 'np.random.choice', (['prefab_textures'], {}), '(prefab_textures)\n', (15185, 15202), True, 'import numpy as np\n'), ((15227, 15254), 'numpy.random.choice', 'np.random.choice', (["['floor']"], {}), "(['floor'])\n", (15243, 15254), True, 'import numpy as np\n'), ((2403, 2417), 'numpy.abs', 'np.abs', (['offset'], {}), '(offset)\n', (2409, 2417), True, 'import numpy as np\n'), ((7317, 7346), 'numpy.arange', 'np.arange', (['(0)', 'extra_length', '(1)'], {}), '(0, extra_length, 1)\n', (7326, 7346), True, 'import numpy as np\n'), ((8056, 8085), 'numpy.arange', 'np.arange', (['(0)', 'extra_length', '(1)'], {}), '(0, extra_length, 1)\n', (8065, 8085), True, 'import numpy as np\n')] |
# Currently not-thorough testing just to speed validation of the basic library functionality
import numpy as np
import tinygraph as tg
import graph_test_suite
import io
import pytest
suite = graph_test_suite.get_full_suite()
def test_create_graphs_types():
"""
Simple tests to try creating graphs of various dtypes
"""
g1_bool = tg.TinyGraph(5, np.bool)
g1_bool[3, 2] = True
assert g1_bool[2, 3] == True
assert g1_bool[3, 2] == True
g1_int32 = tg.TinyGraph(5, np.int32)
g1_int32[3, 2] = 7
assert g1_int32[3, 2] == 7
assert g1_int32[2, 3] == 7
g1_float64 = tg.TinyGraph(5, np.float64)
g1_float64[3, 2] = 3.14
assert g1_float64[3, 2] == 3.14
assert g1_float64[2, 3] == 3.14
def test_graph_properties():
"""
Testing graph properties
"""
g1 = tg.TinyGraph(5, np.int32,
vp_types = {'color' : np.int32},
ep_types = {'color2' : np.int32})
g1.v['color'][2] = 5
g1.v['color'][3] = 8
g1[2,3] = 1
g1.e['color2'][2, 3] = 10
assert g1.v['color'][2] == 5
assert g1.v['color'][3] == 8
assert g1.e['color2'][2, 3] == 10
def test_misbehavior():
"""
Tests for illegal behavior handling
"""
g = tg.TinyGraph(3,vp_types = {"color": np.int32},\
ep_types = {"width": np.int32})
with pytest.raises(KeyError,match='Expecting exactly two endpoints.'):
g[0] = 3
with pytest.raises(KeyError, match='Expecting exactly two endpoints.'):
g[0,1,2] = 3
with pytest.raises(IndexError, match='Self-loops are not allowed.'):
g[1,1] = 3
with pytest.raises(KeyError,match='Expecting exactly two endpoints.'):
e = g[0]
with pytest.raises(KeyError, match='Expecting exactly two endpoints.'):
e = g[0,1,2]
with pytest.raises(KeyError,match='Expecting exactly two endpoints.'):
g.e['width'][0] = 3
with pytest.raises(KeyError, match='Expecting exactly two endpoints.'):
g.e['width'][0,1,2] = 3
with pytest.raises(KeyError,match='Expecting exactly two endpoints.'):
e = g.e["width"][0]
with pytest.raises(KeyError, match='Expecting exactly two endpoints.'):
e = g.e['width'][0,1,2]
with pytest.raises(IndexError):
g.v['color'][0,2] = 1
with pytest.raises(IndexError):
v = g.v['color'][0,2]
def test_basic_functionality():
t = tg.TinyGraph(2, vp_types={'color': np.int32})
assert(t.vert_N == 2)
t[1,0] = 1
assert(t.edge_N == 1)
t.add_vertex(props={'color': 3})
t[1,2] = 1
assert(t.vert_N == 3)
assert(t.edge_N == 2)
t.remove_vertex(0)
assert(t.edge_N == 1)
assert(t.vert_N == 2)
assert(t.v['color'][1] == 3)
assert(t.v['color'][0] == 0)
def test_items():
t = tg.TinyGraph(3, vp_types={'name': np.str})
t.v['name'][0] = 'a'
t.v['name'][1] = 'b'
t.v['name'][2] = 'c'
assert(t.v['name'][0] == 'a')
assert(t.v['name'][1] == 'b')
assert(t.v['name'][2] == 'c')
def test_add_props():
"""
Simple test of adding and removing properties
"""
g = tg.TinyGraph(10, np.float32, ep_types={'color' : np.int32})
g.add_vert_prop('color1', np.float32)
assert 'color1' in g.v
g.add_edge_prop('color2', np.float32)
assert 'color2' in g.e
assert len(g.e) == 2
g.remove_edge_prop('color2')
assert len(g.e) == 1
g.remove_vert_prop('color1')
assert len(g.v) == 0
def test_get_neighbors():
"""
Simple test of getting the neighbors for various vertices.
"""
g = tg.TinyGraph(6)
g[0,1] = 1
g[0,2] = 1
g[1,2] = 1
g[0,3] = 1
g[0,5] = 1
g[3,4] = 1
g[4,5] = 1
assert np.array_equal(g.get_neighbors(0), np.array([1,2,3,5]))
assert np.array_equal(g.get_neighbors(1), np.array([0,2]))
assert np.array_equal(g.get_neighbors(2), np.array([0,1]))
assert np.array_equal(g.get_neighbors(3), np.array([0,4]))
assert np.array_equal(g.get_neighbors(4), np.array([3,5]))
assert np.array_equal(g.get_neighbors(5), np.array([0,4]))
def test_copy():
"""
Copy the graph and ensure that changes are not propagated.
"""
# Problem 1: setting an attribute before the edge exists seems to carry over?
g = tg.TinyGraph(3,
adj_type=np.int,
vp_types={'name': np.dtype("<U10")},
ep_types={'edgename': np.dtype("<U20")})
g.v['name'][0] = "Alice"
g.v['name'][1] = "Bob"
g.v['name'][2] = "Eve"
g[0, 1] = 1
g[1, 2] = 2
g[2, 0] = 1
g.e['edgename'][0, 1] = "main"
g.e['edgename'][2, 0] = "intercept 1"
g.e['edgename'][1, 2] = "intercept 2"
# Deep copy
h = g.copy()
# Trimming h's edges doesn't affect g's adjacency matrix
h[1, 2] = 0
h[0, 2] = 0
assert np.all(g.adjacency == [[0, 1, 1], [1, 0, 2], [1, 2, 0]])
assert np.all(h.adjacency == [[0, 1, 0], [1, 0, 0], [0, 0, 0]])
# Also change the vertex properties
h.v['name'][0] = "Adam"
h.v['name'][1] = "Barbara"
h.v['name'][2] = "Ernie"
assert np.all(g.v['name'] == ["Alice", "Bob", "Eve"])
assert np.all(h.v['name'] == ["Adam", "Barbara", "Ernie"])
# And edge properties
h.e['edgename'][0, 1] = 'Maine!'
assert np.all(h.e_p['edgename'] == [["", "Maine!", ""],
["Maine!", "", ""],
["", "", ""]])
assert np.all(g.e_p['edgename'] == [["", "main", "intercept 1"],
["main", "", "intercept 2"],
["intercept 1", "intercept 2", ""]])
def test_graph_props():
"""
Simple tests of per-graph properties
"""
g1 = tg.TinyGraph(10)
g1.props['foo'] = 'bar'
g2 = tg.TinyGraph(10)
g2.props['foo'] = 'bar'
assert tg.util.graph_equality(g1, g2)
g2.props['baz'] = 7
assert not tg.util.graph_equality(g1, g2)
@pytest.mark.parametrize("test_name", [k for k in suite.keys()])
def test_copy_suite(test_name):
"""
Test graph copy against suite
"""
for g in suite[test_name]:
g1 = g.copy()
assert tg.util.graph_equality(g1, g)
| [
"graph_test_suite.get_full_suite",
"numpy.dtype",
"tinygraph.TinyGraph",
"pytest.raises",
"numpy.array",
"tinygraph.util.graph_equality",
"numpy.all"
] | [((193, 226), 'graph_test_suite.get_full_suite', 'graph_test_suite.get_full_suite', ([], {}), '()\n', (224, 226), False, 'import graph_test_suite\n'), ((349, 373), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(5)', 'np.bool'], {}), '(5, np.bool)\n', (361, 373), True, 'import tinygraph as tg\n'), ((481, 506), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(5)', 'np.int32'], {}), '(5, np.int32)\n', (493, 506), True, 'import tinygraph as tg\n'), ((611, 638), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(5)', 'np.float64'], {}), '(5, np.float64)\n', (623, 638), True, 'import tinygraph as tg\n'), ((823, 913), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(5)', 'np.int32'], {'vp_types': "{'color': np.int32}", 'ep_types': "{'color2': np.int32}"}), "(5, np.int32, vp_types={'color': np.int32}, ep_types={'color2':\n np.int32})\n", (835, 913), True, 'import tinygraph as tg\n'), ((1252, 1327), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(3)'], {'vp_types': "{'color': np.int32}", 'ep_types': "{'width': np.int32}"}), "(3, vp_types={'color': np.int32}, ep_types={'width': np.int32})\n", (1264, 1327), True, 'import tinygraph as tg\n'), ((2421, 2466), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(2)'], {'vp_types': "{'color': np.int32}"}), "(2, vp_types={'color': np.int32})\n", (2433, 2466), True, 'import tinygraph as tg\n'), ((2809, 2851), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(3)'], {'vp_types': "{'name': np.str}"}), "(3, vp_types={'name': np.str})\n", (2821, 2851), True, 'import tinygraph as tg\n'), ((3129, 3187), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(10)', 'np.float32'], {'ep_types': "{'color': np.int32}"}), "(10, np.float32, ep_types={'color': np.int32})\n", (3141, 3187), True, 'import tinygraph as tg\n'), ((3590, 3605), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(6)'], {}), '(6)\n', (3602, 3605), True, 'import tinygraph as tg\n'), ((4846, 4902), 'numpy.all', 'np.all', (['(g.adjacency == [[0, 1, 1], [1, 0, 2], [1, 2, 0]])'], {}), '(g.adjacency == [[0, 1, 1], [1, 0, 2], [1, 2, 0]])\n', (4852, 4902), True, 'import numpy as np\n'), ((4914, 4970), 'numpy.all', 'np.all', (['(h.adjacency == [[0, 1, 0], [1, 0, 0], [0, 0, 0]])'], {}), '(h.adjacency == [[0, 1, 0], [1, 0, 0], [0, 0, 0]])\n', (4920, 4970), True, 'import numpy as np\n'), ((5111, 5157), 'numpy.all', 'np.all', (["(g.v['name'] == ['Alice', 'Bob', 'Eve'])"], {}), "(g.v['name'] == ['Alice', 'Bob', 'Eve'])\n", (5117, 5157), True, 'import numpy as np\n'), ((5169, 5220), 'numpy.all', 'np.all', (["(h.v['name'] == ['Adam', 'Barbara', 'Ernie'])"], {}), "(h.v['name'] == ['Adam', 'Barbara', 'Ernie'])\n", (5175, 5220), True, 'import numpy as np\n'), ((5296, 5383), 'numpy.all', 'np.all', (["(h.e_p['edgename'] == [['', 'Maine!', ''], ['Maine!', '', ''], ['', '', '']])"], {}), "(h.e_p['edgename'] == [['', 'Maine!', ''], ['Maine!', '', ''], ['',\n '', '']])\n", (5302, 5383), True, 'import numpy as np\n'), ((5471, 5598), 'numpy.all', 'np.all', (["(g.e_p['edgename'] == [['', 'main', 'intercept 1'], ['main', '',\n 'intercept 2'], ['intercept 1', 'intercept 2', '']])"], {}), "(g.e_p['edgename'] == [['', 'main', 'intercept 1'], ['main', '',\n 'intercept 2'], ['intercept 1', 'intercept 2', '']])\n", (5477, 5598), True, 'import numpy as np\n'), ((5772, 5788), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(10)'], {}), '(10)\n', (5784, 5788), True, 'import tinygraph as tg\n'), ((5827, 5843), 'tinygraph.TinyGraph', 'tg.TinyGraph', (['(10)'], {}), '(10)\n', (5839, 5843), True, 'import tinygraph as tg\n'), ((5884, 5914), 'tinygraph.util.graph_equality', 'tg.util.graph_equality', (['g1', 'g2'], {}), '(g1, g2)\n', (5906, 5914), True, 'import tinygraph as tg\n'), ((1365, 1430), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Expecting exactly two endpoints."""'}), "(KeyError, match='Expecting exactly two endpoints.')\n", (1378, 1430), False, 'import pytest\n'), ((1457, 1522), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Expecting exactly two endpoints."""'}), "(KeyError, match='Expecting exactly two endpoints.')\n", (1470, 1522), False, 'import pytest\n'), ((1554, 1616), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Self-loops are not allowed."""'}), "(IndexError, match='Self-loops are not allowed.')\n", (1567, 1616), False, 'import pytest\n'), ((1646, 1711), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Expecting exactly two endpoints."""'}), "(KeyError, match='Expecting exactly two endpoints.')\n", (1659, 1711), False, 'import pytest\n'), ((1738, 1803), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Expecting exactly two endpoints."""'}), "(KeyError, match='Expecting exactly two endpoints.')\n", (1751, 1803), False, 'import pytest\n'), ((1835, 1900), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Expecting exactly two endpoints."""'}), "(KeyError, match='Expecting exactly two endpoints.')\n", (1848, 1900), False, 'import pytest\n'), ((1938, 2003), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Expecting exactly two endpoints."""'}), "(KeyError, match='Expecting exactly two endpoints.')\n", (1951, 2003), False, 'import pytest\n'), ((2046, 2111), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Expecting exactly two endpoints."""'}), "(KeyError, match='Expecting exactly two endpoints.')\n", (2059, 2111), False, 'import pytest\n'), ((2149, 2214), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Expecting exactly two endpoints."""'}), "(KeyError, match='Expecting exactly two endpoints.')\n", (2162, 2214), False, 'import pytest\n'), ((2257, 2282), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2270, 2282), False, 'import pytest\n'), ((2323, 2348), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2336, 2348), False, 'import pytest\n'), ((3758, 3780), 'numpy.array', 'np.array', (['[1, 2, 3, 5]'], {}), '([1, 2, 3, 5])\n', (3766, 3780), True, 'import numpy as np\n'), ((3825, 3841), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (3833, 3841), True, 'import numpy as np\n'), ((3888, 3904), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (3896, 3904), True, 'import numpy as np\n'), ((3951, 3967), 'numpy.array', 'np.array', (['[0, 4]'], {}), '([0, 4])\n', (3959, 3967), True, 'import numpy as np\n'), ((4014, 4030), 'numpy.array', 'np.array', (['[3, 5]'], {}), '([3, 5])\n', (4022, 4030), True, 'import numpy as np\n'), ((4077, 4093), 'numpy.array', 'np.array', (['[0, 4]'], {}), '([0, 4])\n', (4085, 4093), True, 'import numpy as np\n'), ((5960, 5990), 'tinygraph.util.graph_equality', 'tg.util.graph_equality', (['g1', 'g2'], {}), '(g1, g2)\n', (5982, 5990), True, 'import tinygraph as tg\n'), ((6207, 6236), 'tinygraph.util.graph_equality', 'tg.util.graph_equality', (['g1', 'g'], {}), '(g1, g)\n', (6229, 6236), True, 'import tinygraph as tg\n'), ((4374, 4390), 'numpy.dtype', 'np.dtype', (['"""<U10"""'], {}), "('<U10')\n", (4382, 4390), True, 'import numpy as np\n'), ((4436, 4452), 'numpy.dtype', 'np.dtype', (['"""<U20"""'], {}), "('<U20')\n", (4444, 4452), True, 'import numpy as np\n')] |
__author__ = "<NAME> (<EMAIL>)"
import numpy as np
import scipy.sparse as spsp
from thread2vec.preprocessing.social_media import anonymized as anonymized_extract
from thread2vec.preprocessing import wrappers
from thread2vec.preprocessing.common import safe_comment_generator
from thread2vec.common import get_package_path
def calculate_reddit_features():
input_file_path = get_package_path() + "/data_folder/anonymized_data/reddit/anonymized_data.txt"
####################################################################################################################
# Iterate over all videos.
####################################################################################################################
graph_generator = form_graphs([input_file_path, ], item_id_set=set(range(35844)))
features_generator = extract_features(graph_generator, "reddit")
reddit_feature_name_list = sorted(get_handcrafted_feature_names("Reddit"))
number_of_reddit_features = len(reddit_feature_name_list)
number_of_items = 35844 # TODO: Make this readable.
features_post = np.empty((number_of_items, number_of_reddit_features), dtype=np.float32)
features_minute = np.empty((number_of_items, number_of_reddit_features), dtype=np.float32)
features_hour = np.empty((number_of_items, number_of_reddit_features), dtype=np.float32)
features_day = np.empty((number_of_items, number_of_reddit_features), dtype=np.float32)
features_week = np.empty((number_of_items, number_of_reddit_features), dtype=np.float32)
features_inf = np.empty((number_of_items, number_of_reddit_features), dtype=np.float32)
features_dict = dict()
features_dict[0] = features_post
features_dict[1] = features_minute
features_dict[2] = features_hour
features_dict[3] = features_day
features_dict[4] = features_week
features_dict[5] = features_inf
counter = 0
for features in features_generator:
for s, snapshot in enumerate(features["snapshots"]):
snapshot_features = snapshot["features"]
for f, feature_name in enumerate(reddit_feature_name_list):
features_dict[s][counter, f] = np.float32(snapshot_features[feature_name])
if s < 5:
for s_extra in range(s+1, 6):
for f, feature_name in enumerate(reddit_feature_name_list):
features_dict[s_extra][counter, f] = np.float32(snapshot_features[feature_name])
counter += 1
np.save(get_package_path() + "/data_folder/anonymized_data/reddit/features_post", features_post)
np.save(get_package_path() + "/data_folder/anonymized_data/reddit/features_minute", features_minute)
np.save(get_package_path() + "/data_folder/anonymized_data/reddit/features_hour", features_hour)
np.save(get_package_path() + "/data_folder/anonymized_data/reddit/features_day", features_day)
np.save(get_package_path() + "/data_folder/anonymized_data/reddit/features_week", features_week)
np.save(get_package_path() + "/data_folder/anonymized_data/reddit/features_inf", features_inf)
def calculate_youtube_features():
input_file_path = get_package_path() + "/data_folder/anonymized_data/youtube/anonymized_data.txt"
####################################################################################################################
# Iterate over all videos.
####################################################################################################################
graph_generator = form_graphs([input_file_path, ], item_id_set=set(range(411288)))
features_generator = extract_features(graph_generator, "youtube")
youtube_feature_name_list = sorted(get_handcrafted_feature_names("YouTube"))
number_of_youtube_features = len(youtube_feature_name_list)
number_of_items = 411288 # TODO: Make this readable.
features_post = np.empty((number_of_items, number_of_youtube_features), dtype=np.float32)
features_minute = np.empty((number_of_items, number_of_youtube_features), dtype=np.float32)
features_hour = np.empty((number_of_items, number_of_youtube_features), dtype=np.float32)
features_day = np.empty((number_of_items, number_of_youtube_features), dtype=np.float32)
features_week = np.empty((number_of_items, number_of_youtube_features), dtype=np.float32)
features_inf = np.empty((number_of_items, number_of_youtube_features), dtype=np.float32)
features_dict = dict()
features_dict[0] = features_post
features_dict[1] = features_minute
features_dict[2] = features_hour
features_dict[3] = features_day
features_dict[4] = features_week
features_dict[5] = features_inf
counter = 0
for features in features_generator:
for s, snapshot in enumerate(features["snapshots"]):
snapshot_features = snapshot["features"]
for f, feature_name in enumerate(youtube_feature_name_list):
features_dict[s][counter, f] = np.float32(snapshot_features[feature_name])
if s < 5:
for s_extra in range(s + 1, 6):
for f, feature_name in enumerate(youtube_feature_name_list):
features_dict[s_extra][counter, f] = np.float32(snapshot_features[feature_name])
counter += 1
np.save(get_package_path() + "/data_folder/anonymized_data/youtube/features_post", features_post)
np.save(get_package_path() + "/data_folder/anonymized_data/youtube/features_minute", features_minute)
np.save(get_package_path() + "/data_folder/anonymized_data/youtube/features_hour", features_hour)
np.save(get_package_path() + "/data_folder/anonymized_data/youtube/features_day", features_day)
np.save(get_package_path() + "/data_folder/anonymized_data/youtube/features_week", features_week)
np.save(get_package_path() + "/data_folder/anonymized_data/youtube/features_inf", features_inf)
########################################################################################################################
def form_graphs(input_file_path_list, item_id_set):
counter = 0
for input_file_path in input_file_path_list:
# for i in range(1):
document_gen = anonymized_extract.document_generator(input_file_path)
for social_context_dict in document_gen:
# print(item_id_set)
print(counter)
if counter in item_id_set:
counter += 1
else:
counter += 1
continue
snapshots,\
targets = get_snapshot_graphs(social_context_dict)
if snapshots is None:
raise ValueError
continue
graph_dict = social_context_dict
graph_dict["snapshots"] = snapshots
graph_dict["targets"] = targets
yield graph_dict
def extract_features(graph_generator, platform):
for graph_snapshot_dict in graph_generator:
snapshots = graph_snapshot_dict["snapshots"]
initial_post = graph_snapshot_dict["initial_post"]
snapshots_with_features = list()
# tweet_timestamp = graph_snapshot_dict["tweet_timestamp"]
for snapshot_dict in snapshots:
comment_tree = snapshot_dict["comment_tree"]
user_graph = snapshot_dict["user_graph"]
timestamp_list = snapshot_dict["timestamp_list"]
features = extract_snapshot_features(comment_tree,
user_graph,
timestamp_list,
# tweet_timestamp,
initial_post,
platform)
snapshot_dict["features"] = features
snapshots_with_features.append(snapshot_dict)
features_dict = graph_snapshot_dict
features_dict["snapshots"] = snapshots_with_features
yield features_dict
def get_snapshot_graphs(social_context):
comment_generator = anonymized_extract.comment_generator
extract_comment_name = anonymized_extract.extract_comment_name
extract_parent_comment_name = anonymized_extract.extract_parent_comment_name
extract_lifetime = anonymized_extract.extract_lifetime
extract_user_name = anonymized_extract.extract_user_name
calculate_targets = anonymized_extract.calculate_targets
extraction_functions = dict()
extraction_functions["comment_generator"] = comment_generator
extraction_functions["extract_comment_name"] = extract_comment_name
extraction_functions["extract_parent_comment_name"] = extract_parent_comment_name
extraction_functions["extract_timestamp"] = extract_lifetime
extraction_functions["extract_user_name"] = extract_user_name
extraction_functions["calculate_targets"] = calculate_targets
anonymous_coward_name = 0
comment_gen = comment_generator(social_context)
initial_post = next(comment_gen)
initial_post_timestamp = extract_lifetime(initial_post)
# post_lifetime_to_assessment = upper_timestamp - initial_post_timestamp
# if post_lifetime_to_assessment < 0.0:
# print("Post timestamp smaller than assessment timestamp. Bad data. Continuing.")
# elif post_lifetime_to_assessment > 604800:
# # Post is older than a week.
# return None, None, None
# else:
# pass
comment_gen = comment_generator(social_context)
comment_name_set,\
user_name_set,\
within_discussion_comment_anonymize,\
within_discussion_user_anonymize,\
within_discussion_anonymous_coward = within_discussion_comment_and_user_anonymization(comment_gen,
extract_comment_name,
extract_user_name,
anonymous_coward_name)
# safe_comment_gen = safe_comment_generator(social_context,
# comment_generator=comment_generator,
# within_discussion_comment_anonymize=within_discussion_comment_anonymize,
# extract_comment_name=extract_comment_name,
# extract_parent_comment_name=extract_parent_comment_name,
# extract_timestamp=extract_lifetime,
# safe=True)
safe_comment_gen = safe_comment_generator(social_context,
extraction_functions,
within_discussion_comment_anonymize)
snapshot_graphs = form_snapshot_graphs(safe_comment_gen,
comment_name_set,
user_name_set,
extract_lifetime,
extract_comment_name,
extract_parent_comment_name,
extract_user_name,
within_discussion_comment_anonymize,
within_discussion_user_anonymize,
within_discussion_anonymous_coward)
if snapshot_graphs is None:
return None, None
try:
all_targets = calculate_targets(social_context)
except KeyError:
return None, None
targets = dict()
targets["comment_count"] = all_targets["comments"]
targets["user_count"] = all_targets["users"]
targets["upvote_count"] = all_targets["number_of_upvotes"]
targets["downvote_count"] = all_targets["number_of_downvotes"]
targets["score"] = all_targets["score_wilson"]
targets["controversiality"] = all_targets["controversiality_wilson"]
return snapshot_graphs, targets
def form_snapshot_graphs(safe_comment_gen,
comment_name_set,
user_name_set,
extract_timestamp,
extract_comment_name,
extract_parent_comment_name,
extract_user_name,
within_discussion_comment_anonymize,
within_discussion_user_anonymize,
within_discussion_anonymous_coward):
# Keep only the social context until the tweet timestamp.
comment_list = list()
timestamp_list = list()
try:
initial_post = next(safe_comment_gen)
except StopIteration:
return None
initial_timestamp = extract_timestamp(initial_post)
comment_list.append(initial_post)
timestamp_list.append(initial_timestamp)
for comment in safe_comment_gen:
comment_timestamp = extract_timestamp(comment)
# Sanitize comment timestamps.
if comment_timestamp < timestamp_list[-1]:
comment_timestamp = timestamp_list[-1]
comment_list.append(comment)
timestamp_list.append(comment_timestamp)
# Decide the snapshot timestamps.
snapshot_timestamps = decide_snapshot_timestamps(timestamp_list,
max_number_of_snapshots_including_zero=10)
# print(snapshot_timestamps)
snapshot_gen = snapshot_generator(comment_list,
timestamp_list,
snapshot_timestamps,
comment_name_set,
user_name_set,
extract_comment_name,
extract_parent_comment_name,
extract_user_name,
within_discussion_comment_anonymize,
within_discussion_user_anonymize,
within_discussion_anonymous_coward)
snapshot_graphs = [snapshot_graph_dict for snapshot_graph_dict in snapshot_gen]
return snapshot_graphs
def decide_snapshot_timestamps(timestamp_list,
max_number_of_snapshots_including_zero):
initial_timestamp = min(timestamp_list)
final_timestamp = max(timestamp_list)
snapshot_timestamps = [initial_timestamp,
initial_timestamp + 60,
initial_timestamp + 3600,
initial_timestamp + 86400,
initial_timestamp + 604800,
final_timestamp]
# discrete_timestamp_list = sorted(set(timestamp_list))
# discrete_timestamp_count = len(discrete_timestamp_list)
# if discrete_timestamp_count < max_number_of_snapshots_including_zero:
# max_number_of_snapshots_including_zero = discrete_timestamp_count
#
# snapshot_timestamps = np.linspace(0,
# len(discrete_timestamp_list)-1,
# num=max_number_of_snapshots_including_zero,
# endpoint=True)
#
# snapshot_timestamps = np.rint(snapshot_timestamps)
# snapshot_timestamps = list(snapshot_timestamps)
# snapshot_timestamps = [discrete_timestamp_list[int(t)] for t in snapshot_timestamps]
return snapshot_timestamps
def snapshot_generator(comment_list,
timestamp_list,
snapshot_timestamps,
comment_name_set,
user_name_set,
extract_comment_name,
extract_parent_comment_name,
extract_user_name,
within_discussion_comment_anonymize,
within_discussion_user_anonymize,
within_discussion_anonymous_coward):
# Initialization.
comment_tree = spsp.dok_matrix((len(comment_name_set),
len(comment_name_set)),
dtype=np.float64)
user_graph = spsp.dok_matrix((len(user_name_set),
len(user_name_set)),
dtype=np.float64)
comment_id_to_user_id = dict()
comment_id_to_user_id[0] = 0
user_name_list = list()
initial_post = comment_list[0]
initial_post_timestamp = timestamp_list[0]
user_name = extract_user_name(initial_post)
if user_name is not None:
user_name_list.append(user_name)
# snapshot_graph_dict = dict()
# snapshot_graph_dict["comment_tree"] = spsp.coo_matrix(comment_tree)
# snapshot_graph_dict["user_graph"] = spsp.coo_matrix(user_graph)
# snapshot_graph_dict["timestamp_list"] = [initial_post_timestamp]
# snapshot_graph_dict["user_set"] = set(user_name_list)
# yield snapshot_graph_dict
snapshot_counter = 0
for counter in range(len(comment_list)):
comment = comment_list[counter]
comment_timestamp = timestamp_list[counter]
user_name = extract_user_name(comment)
if user_name is not None:
user_name_list.append(user_name)
if comment_timestamp > snapshot_timestamps[snapshot_counter]:
snapshot_graph_dict = dict()
snapshot_graph_dict["comment_tree"] = spsp.coo_matrix(comment_tree)
snapshot_graph_dict["user_graph"] = spsp.coo_matrix(user_graph)
snapshot_graph_dict["timestamp_list"] = timestamp_list[:counter+1]
snapshot_graph_dict["user_set"] = set(user_name_list)
yield snapshot_graph_dict
snapshot_counter += 1
if snapshot_counter >= len(snapshot_timestamps):
raise StopIteration
comment_tree,\
user_graph,\
comment_id,\
parent_comment_id,\
commenter_id,\
parent_commenter_id,\
comment_id_to_user_id = update_discussion_and_user_graphs(comment,
extract_comment_name,
extract_parent_comment_name,
extract_user_name,
comment_tree,
user_graph,
within_discussion_comment_anonymize,
within_discussion_user_anonymize,
within_discussion_anonymous_coward,
comment_id_to_user_id)
snapshot_graph_dict = dict()
snapshot_graph_dict["comment_tree"] = spsp.coo_matrix(comment_tree)
snapshot_graph_dict["user_graph"] = spsp.coo_matrix(user_graph)
snapshot_graph_dict["timestamp_list"] = timestamp_list
snapshot_graph_dict["user_set"] = set(user_name_list)
yield snapshot_graph_dict
def update_discussion_and_user_graphs(comment,
extract_comment_name,
extract_parent_comment_name,
extract_user_name,
discussion_tree,
user_graph,
within_discussion_comment_anonymize,
within_discussion_user_anonymize,
within_discussion_anonymous_coward,
comment_id_to_user_id):
"""
Update the discussion tree and the user graph for a discussion.
Does not handle the initial post.
"""
# Extract comment.
comment_name = extract_comment_name(comment)
comment_id = within_discussion_comment_anonymize[comment_name]
# Extract commenter.
commenter_name = extract_user_name(comment)
commenter_id = within_discussion_user_anonymize[commenter_name]
# Update the comment to user map.
comment_id_to_user_id[comment_id] = commenter_id
# Check if this is a comment to the original post or to another comment.
try:
parent_comment_name = extract_parent_comment_name(comment)
except KeyError:
parent_comment_name = None
if parent_comment_name is None:
# The parent is the original post.
parent_comment_id = 0
parent_commenter_id = 0
else:
# The parent is another comment.
try:
parent_comment_id = within_discussion_comment_anonymize[parent_comment_name]
except KeyError:
print("Parent comment does not exist. Comment name: ", comment_name)
raise RuntimeError
# Extract parent comment in order to update user graph.
try:
parent_commenter_id = comment_id_to_user_id[parent_comment_id]
except KeyError:
print("Parent user does not exist. Comment name: ", comment_name)
raise RuntimeError
try:
if within_discussion_anonymous_coward is None:
if user_graph[commenter_id, parent_commenter_id] > 0.0:
user_graph[commenter_id, parent_commenter_id] += 1.0
elif user_graph[parent_commenter_id, commenter_id] > 0.0:
user_graph[parent_commenter_id, commenter_id] += 1.0
else:
user_graph[commenter_id, parent_commenter_id] = 1.0
else:
if within_discussion_anonymous_coward not in (parent_commenter_id,
commenter_id):
if user_graph[commenter_id, parent_commenter_id] > 0.0:
user_graph[commenter_id, parent_commenter_id] += 1.0
elif user_graph[parent_commenter_id, commenter_id] > 0.0:
user_graph[parent_commenter_id, commenter_id] += 1.0
else:
user_graph[commenter_id, parent_commenter_id] = 1.0
except IndexError:
print("Index error: ", user_graph.shape, commenter_id, parent_commenter_id)
raise RuntimeError
# Update discussion radial tree.
discussion_tree[comment_id, parent_comment_id] = 1
return discussion_tree,\
user_graph,\
comment_id,\
parent_comment_id,\
commenter_id,\
parent_commenter_id,\
comment_id_to_user_id
# def safe_comment_generator(document,
# comment_generator,
# within_discussion_comment_anonymize,
# extract_comment_name,
# extract_parent_comment_name,
# extract_timestamp,
# safe):
# """
# We do this in order to correct for nonsensical or missing timestamps.
# """
# if not safe:
# comment_gen = comment_generator(document)
#
# initial_post = next(comment_gen)
# yield initial_post
#
# comment_list = sorted(comment_gen, key=extract_timestamp)
# for comment in comment_list:
# yield comment
# else:
# comment_id_to_comment = dict()
#
# comment_gen = comment_generator(document)
#
# initial_post = next(comment_gen)
# yield initial_post
#
# initial_post_id = within_discussion_comment_anonymize[extract_comment_name(initial_post)]
#
# comment_id_to_comment[initial_post_id] = initial_post
#
# if initial_post_id != 0:
# print("This cannot be.")
# raise RuntimeError
#
# comment_list = list(comment_gen)
# children_dict = collections.defaultdict(list)
# for comment in comment_list:
# # Anonymize comment.
# comment_name = extract_comment_name(comment)
# comment_id = within_discussion_comment_anonymize[comment_name]
#
# parent_comment_name = extract_parent_comment_name(comment)
# if parent_comment_name is None:
# parent_comment_id = 0
# else:
# parent_comment_id = within_discussion_comment_anonymize[parent_comment_name]
#
# comment_id_to_comment[comment_id] = comment
#
# # Update discussion tree.
# children_dict[parent_comment_id].append(comment_id)
#
# # Starting from the root/initial post, we get the children and we put them in a priority queue.
# priority_queue = list()
#
# children = set(children_dict[initial_post_id])
# for child in children:
# comment = comment_id_to_comment[child]
# timestamp = extract_timestamp(comment)
# heapq.heappush(priority_queue, (timestamp, (child, comment)))
#
# # We iteratively yield the topmost priority comment and add to the priority list the children of that comment.
# while True:
# # If priority list empty, we stop.
# if len(priority_queue) == 0:
# break
#
# t = heapq.heappop(priority_queue)
# comment = t[1][1]
# yield comment
#
# children = set(children_dict[int(t[1][0])])
# for child in children:
# comment = comment_id_to_comment[child]
# timestamp = extract_timestamp(comment)
# heapq.heappush(priority_queue, (timestamp, (child, comment)))
def within_discussion_comment_and_user_anonymization(comment_gen,
extract_comment_name,
extract_user_name,
anonymous_coward_name):
"""
Reads all distinct users and comments in a single document and anonymizes them. Roots are 0.
"""
comment_name_set = list()
user_name_set = list()
append_comment_name = comment_name_set.append
append_user_name = user_name_set.append
####################################################################################################################
# Extract comment and user name from the initial post.
####################################################################################################################
initial_post = next(comment_gen)
initial_post_name = extract_comment_name(initial_post)
op_name = extract_user_name(initial_post)
append_comment_name(initial_post_name)
append_user_name(op_name)
####################################################################################################################
# Iterate over all comments.
####################################################################################################################
for comment in comment_gen:
comment_name = extract_comment_name(comment)
commenter_name = extract_user_name(comment)
append_comment_name(comment_name)
append_user_name(commenter_name)
####################################################################################################################
# Perform anonymization.
####################################################################################################################
# Remove duplicates and then remove initial post name because we want to give it id 0.
comment_name_set = set(comment_name_set)
comment_name_set.remove(initial_post_name)
# Remove duplicates and then remove OP because we want to give them id 0.
user_name_set = set(user_name_set)
user_name_set.remove(op_name)
# Anonymize.
within_discussion_comment_anonymize = dict(zip(comment_name_set, range(1, len(comment_name_set) + 1)))
within_discussion_comment_anonymize[initial_post_name] = 0 # Initial Post gets id 0.
within_discussion_user_anonymize = dict(zip(user_name_set, range(1, len(user_name_set) + 1)))
within_discussion_user_anonymize[op_name] = 0 # Original Poster gets id 0.
comment_name_set.add(initial_post_name)
user_name_set.add(op_name)
if anonymous_coward_name is not None:
# if op_name == anonymous_coward_name:
# print("The Original Poster is Anonymous.")
try:
within_discussion_anonymous_coward = within_discussion_user_anonymize[anonymous_coward_name]
except KeyError:
within_discussion_anonymous_coward = None
else:
within_discussion_anonymous_coward = None
return comment_name_set,\
user_name_set,\
within_discussion_comment_anonymize,\
within_discussion_user_anonymize,\
within_discussion_anonymous_coward
def extract_snapshot_features(comment_tree,
user_graph,
timestamp_list,
initial_post,
platform):
graph_snapshot_input = dict()
graph_snapshot_input["comment_tree"] = comment_tree
graph_snapshot_input["user_graph"] = user_graph
graph_snapshot_input["timestamp_list"] = timestamp_list
# graph_snapshot_input["tweet_timestamp"] = tweet_timestamp
graph_snapshot_input["initial_post"] = initial_post
# graph_snapshot_input["author"] = author
feature_names = sorted(get_handcrafted_feature_names(platform))
handcrafted_function_list = [getattr(wrappers, "wrapper_" + feature_name) for feature_name in feature_names]
features = calculate_handcrafted_features(graph_snapshot_input,
feature_names,
handcrafted_function_list)
return features
def calculate_handcrafted_features(graph_snapshot_input,
feature_names,
handcrafted_function_list):
features = dict()
for feature_name, calculation_function in zip(feature_names, handcrafted_function_list):
feature_value = calculation_function(graph_snapshot_input)
features[feature_name] = feature_value
return features
def get_handcrafted_feature_names(platform):
"""
Returns a set of feature names to be calculated.
Output: - names: A set of strings, corresponding to the features to be calculated.
"""
names = set()
####################################################################################################################
# Add basic discussion tree features.
####################################################################################################################
names.update(["comment_count",
"max_depth",
"avg_depth",
"max_width",
"avg_width",
"max_depth_over_max_width",
"avg_depth_over_width"])
####################################################################################################################
# Add branching discussion tree features.
####################################################################################################################
names.update(["comment_tree_hirsch",
"comment_tree_wiener",
"comment_tree_randic"])
####################################################################################################################
# Add user graph features.
####################################################################################################################
names.update(["user_count",
"user_graph_hirsch",
"user_graph_randic",
"outdegree_entropy",
"norm_outdegree_entropy",
"indegree_entropy",
"norm_indegree_entropy"])
####################################################################################################################
# Add temporal features.
####################################################################################################################
names.update(["avg_time_differences_1st_half",
"avg_time_differences_2nd_half",
"time_differences_std"])
####################################################################################################################
# Add YouTube channel features.
####################################################################################################################
# if platform == "YouTube":
# names.update(["author_privacy_status_youtube",
# "author_is_linked_youtube",
# "author_long_uploads_status_youtube",
# "author_comment_count_youtube",
# "author_comment_rate_youtube",
# "author_view_count_youtube",
# "author_view_rate_youtube",
# "author_video_upload_count_youtube",
# "author_video_upload_rate_youtube",
# "author_subscriber_count_youtube",
# "author_subscriber_rate_youtube",
# "author_hidden_subscriber_count_youtube",
# "author_channel_lifetime_youtube"])
####################################################################################################################
# Add Reddit author features.
####################################################################################################################
# elif platform == "Reddit":
# names.update(["author_has_verified_mail_reddit",
# "author_account_lifetime_reddit",
# "author_hide_from_robots_reddit",
# "author_is_mod_reddit",
# "author_link_karma_reddit",
# "author_link_karma_rate_reddit",
# "author_comment_karma_reddit",
# "author_comment_karma_rate_reddit",
# "author_is_gold_reddit"])
# else:
# print("Invalid platform name.")
# raise RuntimeError
return names
# print(sorted(get_handcrafted_feature_names("YouTube")))
# print(sorted(get_handcrafted_feature_names("Reddit")))
def make_features_vector(features_dict, platform):
feature_names = sorted(get_handcrafted_feature_names(platform))
features_vector_list = list()
for feature_name in feature_names:
feature_value = features_dict[feature_name]
features_vector_list.append(feature_value)
features_vector = np.empty((1, len(feature_names)), dtype=np.float64)
for i, v in enumerate(features_vector_list):
features_vector[0, i] = v
return features_vector
| [
"thread2vec.common.get_package_path",
"thread2vec.preprocessing.common.safe_comment_generator",
"numpy.empty",
"numpy.float32",
"scipy.sparse.coo_matrix",
"thread2vec.preprocessing.social_media.anonymized.document_generator"
] | [((1137, 1209), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_reddit_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_reddit_features), dtype=np.float32)\n', (1145, 1209), True, 'import numpy as np\n'), ((1233, 1305), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_reddit_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_reddit_features), dtype=np.float32)\n', (1241, 1305), True, 'import numpy as np\n'), ((1327, 1399), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_reddit_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_reddit_features), dtype=np.float32)\n', (1335, 1399), True, 'import numpy as np\n'), ((1420, 1492), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_reddit_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_reddit_features), dtype=np.float32)\n', (1428, 1492), True, 'import numpy as np\n'), ((1514, 1586), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_reddit_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_reddit_features), dtype=np.float32)\n', (1522, 1586), True, 'import numpy as np\n'), ((1607, 1679), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_reddit_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_reddit_features), dtype=np.float32)\n', (1615, 1679), True, 'import numpy as np\n'), ((3976, 4049), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_youtube_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_youtube_features), dtype=np.float32)\n', (3984, 4049), True, 'import numpy as np\n'), ((4073, 4146), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_youtube_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_youtube_features), dtype=np.float32)\n', (4081, 4146), True, 'import numpy as np\n'), ((4168, 4241), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_youtube_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_youtube_features), dtype=np.float32)\n', (4176, 4241), True, 'import numpy as np\n'), ((4262, 4335), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_youtube_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_youtube_features), dtype=np.float32)\n', (4270, 4335), True, 'import numpy as np\n'), ((4357, 4430), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_youtube_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_youtube_features), dtype=np.float32)\n', (4365, 4430), True, 'import numpy as np\n'), ((4451, 4524), 'numpy.empty', 'np.empty', (['(number_of_items, number_of_youtube_features)'], {'dtype': 'np.float32'}), '((number_of_items, number_of_youtube_features), dtype=np.float32)\n', (4459, 4524), True, 'import numpy as np\n'), ((10911, 11012), 'thread2vec.preprocessing.common.safe_comment_generator', 'safe_comment_generator', (['social_context', 'extraction_functions', 'within_discussion_comment_anonymize'], {}), '(social_context, extraction_functions,\n within_discussion_comment_anonymize)\n', (10933, 11012), False, 'from thread2vec.preprocessing.common import safe_comment_generator\n'), ((19556, 19585), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['comment_tree'], {}), '(comment_tree)\n', (19571, 19585), True, 'import scipy.sparse as spsp\n'), ((19627, 19654), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['user_graph'], {}), '(user_graph)\n', (19642, 19654), True, 'import scipy.sparse as spsp\n'), ((393, 411), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (409, 411), False, 'from thread2vec.common import get_package_path\n'), ((3224, 3242), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (3240, 3242), False, 'from thread2vec.common import get_package_path\n'), ((6317, 6371), 'thread2vec.preprocessing.social_media.anonymized.document_generator', 'anonymized_extract.document_generator', (['input_file_path'], {}), '(input_file_path)\n', (6354, 6371), True, 'from thread2vec.preprocessing.social_media import anonymized as anonymized_extract\n'), ((2563, 2581), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (2579, 2581), False, 'from thread2vec.common import get_package_path\n'), ((2665, 2683), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (2681, 2683), False, 'from thread2vec.common import get_package_path\n'), ((2771, 2789), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (2787, 2789), False, 'from thread2vec.common import get_package_path\n'), ((2873, 2891), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (2889, 2891), False, 'from thread2vec.common import get_package_path\n'), ((2973, 2991), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (2989, 2991), False, 'from thread2vec.common import get_package_path\n'), ((3075, 3093), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (3091, 3093), False, 'from thread2vec.common import get_package_path\n'), ((5412, 5430), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (5428, 5430), False, 'from thread2vec.common import get_package_path\n'), ((5515, 5533), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (5531, 5533), False, 'from thread2vec.common import get_package_path\n'), ((5622, 5640), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (5638, 5640), False, 'from thread2vec.common import get_package_path\n'), ((5725, 5743), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (5741, 5743), False, 'from thread2vec.common import get_package_path\n'), ((5826, 5844), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (5842, 5844), False, 'from thread2vec.common import get_package_path\n'), ((5929, 5947), 'thread2vec.common.get_package_path', 'get_package_path', ([], {}), '()\n', (5945, 5947), False, 'from thread2vec.common import get_package_path\n'), ((17989, 18018), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['comment_tree'], {}), '(comment_tree)\n', (18004, 18018), True, 'import scipy.sparse as spsp\n'), ((18068, 18095), 'scipy.sparse.coo_matrix', 'spsp.coo_matrix', (['user_graph'], {}), '(user_graph)\n', (18083, 18095), True, 'import scipy.sparse as spsp\n'), ((2237, 2280), 'numpy.float32', 'np.float32', (['snapshot_features[feature_name]'], {}), '(snapshot_features[feature_name])\n', (2247, 2280), True, 'import numpy as np\n'), ((5083, 5126), 'numpy.float32', 'np.float32', (['snapshot_features[feature_name]'], {}), '(snapshot_features[feature_name])\n', (5093, 5126), True, 'import numpy as np\n'), ((2480, 2523), 'numpy.float32', 'np.float32', (['snapshot_features[feature_name]'], {}), '(snapshot_features[feature_name])\n', (2490, 2523), True, 'import numpy as np\n'), ((5329, 5372), 'numpy.float32', 'np.float32', (['snapshot_features[feature_name]'], {}), '(snapshot_features[feature_name])\n', (5339, 5372), True, 'import numpy as np\n')] |
import numpy as np
import soundfile
import librosa
import os
from sklearn import metrics
import logging
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import config
from datetime import datetime
def compute_time_consumed(start_time):
"""
计算训练总耗时
:param start_time:
:return:
"""
time_elapsed = datetime.now() - start_time
seconds = time_elapsed.seconds
hour = seconds // 3600
minute = (seconds % 3600) // 60
second = seconds % 3600 % 60
print("本次训练共耗时 {0} 时 {1} 分 {2} 秒".format(hour, minute, second))
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
name_ext = path.split('/')[-1]
name = os.path.splitext(name_ext)[0]
return name
def create_logging(log_dir, filemode):
create_folder(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, '%04d.log' % i1)):
i1 += 1
log_path = os.path.join(log_dir, '%04d.log' % i1)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
def read_audio(path, target_fs=None):
# (audio, fs) = soundfile.read(path)
# if audio.ndim > 1:
# audio = np.mean(audio, axis=1)
# audio = np.swapaxes(audio, 0, 1)
# if target_fs is not None and fs != target_fs:
# audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
# fs = target_fs
return librosa.load(path, sr=target_fs, mono=False)
def calculate_scalar(x):
if x.ndim == 2:
axis = 0
elif x.ndim == 3:
axis = (0, 1)
elif x.ndim == 4:
axis = (0, 1, 2)
mean = np.mean(x, axis=axis)
std = np.std(x, axis=axis)
return mean, std
def scale(x, mean, std):
return (x - mean) / std
def inverse_scale(x, mean, std):
return x * std + mean
def calculate_accuracy(target, predict, classes_num, average=None):
"""Calculate accuracy.
Inputs:
target: integer array, (audios_num,)
predict: integer array, (audios_num,)
Outputs:
accuracy: float
"""
samples_num = len(target)
correctness = np.zeros(classes_num)
total = np.zeros(classes_num)
for n in range(samples_num):
total[target[n]] += 1
if target[n] == predict[n]: # 预测正确的情况
correctness[target[n]] += 1
accuracy = correctness / total
if average is None:
return accuracy
elif average == 'macro':
return np.mean(accuracy)
else:
raise Exception('Incorrect average!')
def calculate_confusion_matrix(target, predict, classes_num):
"""Calculate confusion matrix.
Inputs:
target: integer array, (audios_num,)
predict: integer array, (audios_num,)
classes_num: int, number of classes
Outputs:
confusion_matrix: (classes_num, classes_num)
"""
confusion_matrix = np.zeros((classes_num, classes_num))
samples_num = len(target)
for n in range(samples_num):
confusion_matrix[target[n], predict[n]] += 1
return confusion_matrix
def print_accuracy(class_wise_accuracy, labels):
print('{:<30}{}'.format('Scene label', 'accuracy'))
print('------------------------------------------------')
for (n, label) in enumerate(labels):
print('{:<30}{:.3f}'.format(label, class_wise_accuracy[n]))
print('------------------------------------------------')
print('{:<30}{:.3f}'.format('Average', np.mean(class_wise_accuracy)))
def plot_confusion_matrix(confusion_matrix, title, labels, values):
"""Plot confusion matrix.
Inputs:
confusion_matrix: matrix, (classes_num, classes_num)
labels: list of labels
values: list of values to be shown in diagonal
Ouputs:
None
"""
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
cax = ax.matshow(confusion_matrix, cmap=plt.cm.Blues)
if labels:
ax.set_xticklabels([''] + labels, rotation=90, ha='left')
ax.set_yticklabels([''] + labels)
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
for n in range(len(values)):
plt.text(n - 0.4, n, '{:.2f}'.format(values[n]), color='yellow')
plt.title(title)
plt.xlabel('Predicted')
plt.ylabel('Target')
plt.tight_layout()
plt.show()
def plot_confusion_matrix2(confusion_matrix, title, labels):
"""Plot confusion matrix.
Inputs:
confusion_matrix: matrix, (classes_num, classes_num)
labels: list of labels
Ouputs:
None
"""
plt.rcParams.update({'font.size': 10.5})
import itertools
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
cax = ax.matshow(confusion_matrix, cmap=plt.cm.Blues)
ax.set_xlabel('Predicted')
ax.set_ylabel('Target')
if labels:
ax.set_xticklabels([''] + labels, rotation=45)
ax.set_yticklabels([''] + labels)
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
row, column = confusion_matrix.shape
confusion_matrix = np.asarray(confusion_matrix, np.int32)
for i, j in itertools.product(range(row), range(column)):
plt.text(j, i, confusion_matrix[i, j], horizontalalignment='center',
color='white' if i == j else "black")
plt.title(title)
ttl = ax.title
ttl.set_position([.5, 1.01])
plt.tight_layout()
plt.show()
def write_leaderboard_submission(submission_path, audio_names, predictions):
ix_to_lb = config.ix_to_lb
f = open(submission_path, 'w')
f.write('Id,Scene_label\n')
for n in range(len(audio_names)):
f.write('{}'.format(os.path.splitext(audio_names[n])[0]))
f.write(',')
f.write(ix_to_lb[predictions[n]])
f.write('\n')
f.close()
logging.info('Write result to {}'.format(submission_path))
def write_evaluation_submission(submission_path, audio_names, predictions):
ix_to_lb = config.ix_to_lb
f = open(submission_path, 'w')
for n in range(len(audio_names)):
f.write('audio/{}'.format(audio_names[n]))
f.write('\t')
f.write(ix_to_lb[predictions[n]])
f.write('\n')
f.close()
logging.info('Write result to {}'.format(submission_path))
def calculate_stats(output, target):
"""Calculate statistics including mAP, AUC, etc.
Args:
output: 2d array, (samples_num, classes_num)
target: 2d array, (samples_num, classes_num)
Returns:
stats: list of statistic of each class.
"""
classes_num = target.shape[-1]
stats = []
# Class-wise statistics
for k in range(classes_num):
# Average precision
avg_precision = metrics.average_precision_score(
target[:, k], output[:, k], average=None)
# AUC
auc = metrics.roc_auc_score(target[:, k], output[:, k], average=None)
# Precisions, recalls
(precisions, recalls, thresholds) = metrics.precision_recall_curve(
target[:, k], output[:, k])
# FPR, TPR
(fpr, tpr, thresholds) = metrics.roc_curve(target[:, k], output[:, k])
save_every_steps = 1000 # Sample statistics to reduce size
dict = {'precisions': precisions[0::save_every_steps],
'recalls': recalls[0::save_every_steps],
'AP': avg_precision,
'fpr': fpr[0::save_every_steps],
'fnr': 1. - tpr[0::save_every_steps],
'auc': auc}
stats.append(dict)
return stats
if __name__ == '__main__':
cm = np.load('../data1.npy')
labels = ['airport', 'bus', 'metro', 'metro_station', 'park', 'public_square',
'shopping_mall', 'street_pedestrian', 'street_traffic', 'tram']
plot_confusion_matrix2(cm, 'the confusion matrix of DA-MFCNN', labels)
| [
"matplotlib.pyplot.title",
"numpy.load",
"logging.Formatter",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.std",
"os.path.exists",
"matplotlib.pyplot.rcParams.update",
"matplotlib.ticker.MultipleLocator",
"sklearn.metrics.average_precision_... | [((684, 706), 'os.path.realpath', 'os.path.realpath', (['path'], {}), '(path)\n', (700, 706), False, 'import os\n'), ((977, 1015), 'os.path.join', 'os.path.join', (['log_dir', "('%04d.log' % i1)"], {}), "(log_dir, '%04d.log' % i1)\n", (989, 1015), False, 'import os\n'), ((1020, 1218), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"""', 'datefmt': '"""%a, %d %b %Y %H:%M:%S"""', 'filename': 'log_path', 'filemode': 'filemode'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S', filename=log_path, filemode=filemode)\n", (1039, 1218), False, 'import logging\n'), ((1289, 1312), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1310, 1312), False, 'import logging\n'), ((1364, 1425), 'logging.Formatter', 'logging.Formatter', (['"""%(name)-12s: %(levelname)-8s %(message)s"""'], {}), "('%(name)-12s: %(levelname)-8s %(message)s')\n", (1381, 1425), False, 'import logging\n'), ((1880, 1924), 'librosa.load', 'librosa.load', (['path'], {'sr': 'target_fs', 'mono': '(False)'}), '(path, sr=target_fs, mono=False)\n', (1892, 1924), False, 'import librosa\n'), ((2092, 2113), 'numpy.mean', 'np.mean', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (2099, 2113), True, 'import numpy as np\n'), ((2124, 2144), 'numpy.std', 'np.std', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (2130, 2144), True, 'import numpy as np\n'), ((2574, 2595), 'numpy.zeros', 'np.zeros', (['classes_num'], {}), '(classes_num)\n', (2582, 2595), True, 'import numpy as np\n'), ((2608, 2629), 'numpy.zeros', 'np.zeros', (['classes_num'], {}), '(classes_num)\n', (2616, 2629), True, 'import numpy as np\n'), ((3326, 3362), 'numpy.zeros', 'np.zeros', (['(classes_num, classes_num)'], {}), '((classes_num, classes_num))\n', (3334, 3362), True, 'import numpy as np\n'), ((4220, 4246), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (4230, 4246), True, 'import matplotlib.pyplot as plt\n'), ((4735, 4751), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4744, 4751), True, 'import matplotlib.pyplot as plt\n'), ((4756, 4779), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (4766, 4779), True, 'import matplotlib.pyplot as plt\n'), ((4784, 4804), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Target"""'], {}), "('Target')\n", (4794, 4804), True, 'import matplotlib.pyplot as plt\n'), ((4809, 4827), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4825, 4827), True, 'import matplotlib.pyplot as plt\n'), ((4832, 4842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4840, 4842), True, 'import matplotlib.pyplot as plt\n'), ((5073, 5113), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 10.5}"], {}), "({'font.size': 10.5})\n", (5092, 5113), True, 'import matplotlib.pyplot as plt\n'), ((5145, 5171), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (5155, 5171), True, 'import matplotlib.pyplot as plt\n'), ((5661, 5699), 'numpy.asarray', 'np.asarray', (['confusion_matrix', 'np.int32'], {}), '(confusion_matrix, np.int32)\n', (5671, 5699), True, 'import numpy as np\n'), ((5900, 5916), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5909, 5916), True, 'import matplotlib.pyplot as plt\n'), ((5973, 5991), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5989, 5991), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6004, 6006), True, 'import matplotlib.pyplot as plt\n'), ((8155, 8178), 'numpy.load', 'np.load', (['"""../data1.npy"""'], {}), "('../data1.npy')\n", (8162, 8178), True, 'import numpy as np\n'), ((340, 354), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (352, 354), False, 'from datetime import datetime\n'), ((603, 621), 'os.path.exists', 'os.path.exists', (['fd'], {}), '(fd)\n', (617, 621), False, 'import os\n'), ((631, 646), 'os.makedirs', 'os.makedirs', (['fd'], {}), '(fd)\n', (642, 646), False, 'import os\n'), ((753, 779), 'os.path.splitext', 'os.path.splitext', (['name_ext'], {}), '(name_ext)\n', (769, 779), False, 'import os\n'), ((904, 942), 'os.path.join', 'os.path.join', (['log_dir', "('%04d.log' % i1)"], {}), "(log_dir, '%04d.log' % i1)\n", (916, 942), False, 'import os\n'), ((4538, 4563), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (4560, 4563), True, 'import matplotlib.ticker as ticker\n'), ((4596, 4621), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (4618, 4621), True, 'import matplotlib.ticker as ticker\n'), ((5511, 5536), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (5533, 5536), True, 'import matplotlib.ticker as ticker\n'), ((5569, 5594), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (5591, 5594), True, 'import matplotlib.ticker as ticker\n'), ((5771, 5882), 'matplotlib.pyplot.text', 'plt.text', (['j', 'i', 'confusion_matrix[i, j]'], {'horizontalalignment': '"""center"""', 'color': "('white' if i == j else 'black')"}), "(j, i, confusion_matrix[i, j], horizontalalignment='center', color=\n 'white' if i == j else 'black')\n", (5779, 5882), True, 'import matplotlib.pyplot as plt\n'), ((7290, 7363), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['target[:, k]', 'output[:, k]'], {'average': 'None'}), '(target[:, k], output[:, k], average=None)\n', (7321, 7363), False, 'from sklearn import metrics\n'), ((7405, 7468), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['target[:, k]', 'output[:, k]'], {'average': 'None'}), '(target[:, k], output[:, k], average=None)\n', (7426, 7468), False, 'from sklearn import metrics\n'), ((7544, 7602), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['target[:, k]', 'output[:, k]'], {}), '(target[:, k], output[:, k])\n', (7574, 7602), False, 'from sklearn import metrics\n'), ((7669, 7714), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['target[:, k]', 'output[:, k]'], {}), '(target[:, k], output[:, k])\n', (7686, 7714), False, 'from sklearn import metrics\n'), ((1466, 1487), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (1483, 1487), False, 'import logging\n'), ((2913, 2930), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (2920, 2930), True, 'import numpy as np\n'), ((3892, 3920), 'numpy.mean', 'np.mean', (['class_wise_accuracy'], {}), '(class_wise_accuracy)\n', (3899, 3920), True, 'import numpy as np\n'), ((6252, 6284), 'os.path.splitext', 'os.path.splitext', (['audio_names[n]'], {}), '(audio_names[n])\n', (6268, 6284), False, 'import os\n')] |
"""Extract training samples from OSM."""
import geopandas as gpd
import numpy as np
import pandas as pd
from rasterio import Affine
from rasterio.crs import CRS
from rasterio.features import rasterize
from rasterio.warp import reproject, Resampling
from scipy.ndimage.morphology import distance_transform_edt
from shapely.geometry import mapping
from maupp.osm import urban_blocks
from maupp.utils import reproject_features, iter_geoms, filter_features
WGS84 = CRS(init='epsg:4326')
def buildings(osm, aoi, crs, transform, width, height, min_coverage=0.2):
"""Extract and rasterize building footprints from OSM.
Parameters
----------
osm : maupp.osm.OSMDatabase
An initialized OSMDatabase object.
aoi : shapely polygon
Area of interest in lat/lon coordinates.
crs : CRS
Target CRS (rasterio.crs.CRS object).
transform : Affine
Target affine transform.
width : int
Target width.
height : int
Target height.
min_coverage : float, optional
Min. surface of a pixel that footprints must cover.
Returns
-------
out_img : numpy 2d array
Rasterized building footprints.
"""
# Rasterize building footprints into a binary raster with
# 4x smaller pixel size
SCALE = 0.25
tmp_transform, tmp_width, tmp_height = rescale_transform(
transform, width, height, SCALE)
features = osm.buildings(aoi)
if len(features) == 0:
return np.zeros(shape=(height, width), dtype=np.bool)
features = reproject_features(features, src_crs=WGS84, dst_crs=crs)
footprints = rasterize(
shapes=iter_geoms(features),
out_shape=(tmp_height, tmp_width),
transform=tmp_transform,
all_touched=False,
dtype='uint8')
# Resample binary raster to a continuous raster with 4x higher
# pixel size by averaging the values of the binary raster.
cover, _ = rescale(
src_array=footprints,
src_transform=tmp_transform,
crs=crs,
scale=1/SCALE)
# Convert to binary raster
return cover >= min_coverage
def blocks(osm, aoi, crs, transform, width, height, max_surface=30000):
"""Extract and rasterize urban blocks from the OSM road network.
Parameters
----------
osm : maupp.osm.OSMDatabase
An initialized OSMDatabase object.
aoi : shapely polygon
Area of interest in lat/lon coordinates.
crs : CRS
Target CRS (rasterio.crs.CRS object).
transform : Affine
Target affine transform.
width : int
Target width.
height : int
Target height.
max_surface : int, optional
Max. surface of a block in meters.
Returns
-------
out_img : numpy 2d array
Rasterized urban blocks.
"""
# Extract only roads of interest
ROAD_TAGS = ('secondary', 'tertiary', 'residential')
roads = gpd.GeoDataFrame.from_features(osm.roads(aoi), crs=WGS84)
roads = roads[roads.highway.isin(ROAD_TAGS)]
if len(roads) == 0:
return np.zeros(shape=(height, width), dtype=np.bool)
# Compute urban blocks from the road network and filter the
# output polygons by their surface
polygons = urban_blocks(roads.__geo_interface__['features'])
polygons = gpd.GeoDataFrame.from_features(polygons, crs=WGS84)
polygons = polygons.to_crs(crs)
polygons = polygons[polygons.area <= max_surface]
# Rasterize urban blocks
return rasterize(
shapes=(mapping(geom) for geom in polygons.geometry),
out_shape=(height, width),
transform=transform,
dtype='uint8').astype(np.bool)
def nonbuilt(osm, aoi, crs, transform, width, height):
"""Extract and rasterize non-built `landuse`, `leisure` and `natural`
features from OSM.
Parameters
----------
osm : maupp.osm.OSMDatabase
An initialized OSMDatabase object.
aoi : shapely polygon
Area of interest in lat/lon coordinates.
crs : CRS
Target CRS (rasterio.crs.CRS object).
transform : Affine
Target affine transform.
width : int
Target width.
height : int
Target height.
Returns
-------
out_img : numpy 2d array
Rasterized non-built features.
"""
NONBUILT_TAGS = ['sand', 'farmland', 'wetland', 'wood', 'park', 'forest',
'nature_reserve', 'golf_course', 'greenfield', 'quarry',
'pitch', 'scree', 'meadow', 'orchard', 'grass',
'grassland', 'garden', 'heath', 'bare_rock', 'beach']
nonbuilt = osm.landuse(aoi) + osm.leisure(aoi) + osm.natural(aoi)
nonbuilt = gpd.GeoDataFrame.from_features(nonbuilt, crs=WGS84)
def tag(row):
for key in ('landuse', 'leisure', 'natural'):
if key in row:
if not pd.isna(row[key]):
return row[key]
return None
nonbuilt['tag'] = nonbuilt.apply(tag, axis=1)
nonbuilt = nonbuilt[nonbuilt.tag.isin(NONBUILT_TAGS)]
if len(nonbuilt) == 0:
return np.zeros(shape=(height, width), dtype=np.bool)
nonbuilt = nonbuilt.to_crs(crs)
return rasterize(
shapes=(mapping(geom) for geom in nonbuilt.geometry),
out_shape=(height, width),
transform=transform,
dtype='uint8').astype(np.bool)
def remote(osm, aoi, crs, transform, width, height, min_distance=250):
"""Identify remote areas (i.e. distant from any building or road).
Roads identified as paths or tracks are ignored.
Parameters
----------
osm : maupp.osm.OSMDatabase
An initialized OSMDatabase object.
aoi : shapely polygon
Area of interest in lat/lon coordinates.
crs : CRS
Target CRS (rasterio.crs.CRS object).
transform : Affine
Target affine transform.
width : int
Target width.
height : int
Target height.
min_distance : int, optional
Min. distance from roads or buildings.
Returns
-------
out_img : numpy 2d array
Binary 2d array.
"""
roads = osm.roads(aoi)
roads = filter_features(roads, 'highway', exclude=['path', 'track'])
roads = reproject_features(roads, WGS84, crs)
roads = rasterize(
shapes=iter_geoms(roads),
out_shape=(height, width),
transform=transform,
all_touched=True,
dtype='uint8').astype(np.bool)
builtup = osm.buildings(aoi)
if len(builtup) > 0:
builtup = reproject_features(builtup, WGS84, crs)
builtup = rasterize(
shapes=iter_geoms(builtup),
out_shape=(height, width),
transform=transform,
all_touched=True,
dtype='uint8').astype(np.bool)
else:
builtup = np.zeros(shape=(height, width), dtype=np.bool)
# Calculate distance of each pixel from roads or buildings
# and multiply by pixel size to output values in meters.
urban = roads | builtup
distance = distance_transform_edt(np.logical_not(urban))
return distance * transform.a >= min_distance
def rescale_transform(src_transform, src_width, src_height, scale):
"""Calculate the transform corresponding to a pixel size multiplied
by a given scale factor.
Parameters
----------
src_transform : Affine
Source affine transform.
src_width : int
Source raster width.
src_height : int
Source raster height.
scale : float
Scale factor (e.g. 0.5 to reduce pixel size by half).
Returns
-------
dst_transform : Affine
New affine transform.
dst_width : int
New raster width.
dst_height : int
New raster height.
"""
dst_transform = Affine(
src_transform.a * scale,
src_transform.b,
src_transform.c,
src_transform.d,
src_transform.e * scale,
src_transform.f)
dst_width = int(src_width / scale)
dst_height = int(src_height / scale)
return dst_transform, dst_width, dst_height
def rescale(src_array, src_transform, crs, scale, resampling_method='average'):
"""Rescale a raster by multiplying its pixel size by the `scale` value.
Parameters
----------
src_array : numpy 2d array
Source raster.
src_transform : rasterio.Affine
Source affine transform.
crs : rasterio.crs.CRS
Source & destination CRS.
scale : int
Scale factor (e.g. 0.5 to reduce pixel size by half).
resampling_method : str, optional
Possible values are 'nearest', 'bilinear', 'cubic', 'cubic_spline',
'lanczos', 'average', 'mode', 'gauss', 'max', 'min' and 'med'.
Returns
-------
dst_array : numpy 2d array
Output raster.
dst_transform : rasterio.Affine
Output affine transform.
"""
resampling_method = getattr(Resampling, resampling_method)
src_height, src_width = src_array.shape
dst_transform, dst_width, dst_height = rescale_transform(
src_transform, src_width, src_height, scale)
dst_array = np.empty((dst_height, dst_width), 'float32')
reproject(
src_array, dst_array,
src_transform=src_transform,
src_crs=crs,
dst_transform=dst_transform,
dst_crs=crs,
resampling=resampling_method)
return dst_array, dst_transform
def training_dataset(buildings, blocks, nonbuilt, remote, water):
"""Build a two-class training dataset from OSM features.
Legend: 0=NA, 1=Built-up, 2=Non-built-up.
Parameters
----------
buildings : numpy 2d array
Binary building footprints raster.
blocks : numpy 2d array
Binary urban blocks raster.
nonbuilt : numpy 2d array
Binary non-built-up raster.
remote : numpy 2d array
Binary remote areas raster.
water : numpy 2d array
Binary water mask.
Returns
-------
training_dataset : numpy 2d array
Output training data raster.
"""
positive = buildings | blocks
negative = nonbuilt | remote
# Ignore pixels with multiple values or in water areas
confusion = positive & negative
mask = confusion | water
positive[mask] = 0
negative[mask] = 0
training_samples = np.zeros(shape=positive.shape, dtype='uint8')
training_samples[positive] = 1
training_samples[negative] = 2
return training_samples
| [
"maupp.utils.reproject_features",
"geopandas.GeoDataFrame.from_features",
"rasterio.warp.reproject",
"numpy.empty",
"rasterio.Affine",
"numpy.logical_not",
"numpy.zeros",
"maupp.utils.iter_geoms",
"shapely.geometry.mapping",
"maupp.utils.filter_features",
"rasterio.crs.CRS",
"maupp.osm.urban_b... | [((465, 486), 'rasterio.crs.CRS', 'CRS', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (468, 486), False, 'from rasterio.crs import CRS\n'), ((1544, 1600), 'maupp.utils.reproject_features', 'reproject_features', (['features'], {'src_crs': 'WGS84', 'dst_crs': 'crs'}), '(features, src_crs=WGS84, dst_crs=crs)\n', (1562, 1600), False, 'from maupp.utils import reproject_features, iter_geoms, filter_features\n'), ((3225, 3274), 'maupp.osm.urban_blocks', 'urban_blocks', (["roads.__geo_interface__['features']"], {}), "(roads.__geo_interface__['features'])\n", (3237, 3274), False, 'from maupp.osm import urban_blocks\n'), ((3290, 3341), 'geopandas.GeoDataFrame.from_features', 'gpd.GeoDataFrame.from_features', (['polygons'], {'crs': 'WGS84'}), '(polygons, crs=WGS84)\n', (3320, 3341), True, 'import geopandas as gpd\n'), ((4664, 4715), 'geopandas.GeoDataFrame.from_features', 'gpd.GeoDataFrame.from_features', (['nonbuilt'], {'crs': 'WGS84'}), '(nonbuilt, crs=WGS84)\n', (4694, 4715), True, 'import geopandas as gpd\n'), ((6115, 6175), 'maupp.utils.filter_features', 'filter_features', (['roads', '"""highway"""'], {'exclude': "['path', 'track']"}), "(roads, 'highway', exclude=['path', 'track'])\n", (6130, 6175), False, 'from maupp.utils import reproject_features, iter_geoms, filter_features\n'), ((6188, 6225), 'maupp.utils.reproject_features', 'reproject_features', (['roads', 'WGS84', 'crs'], {}), '(roads, WGS84, crs)\n', (6206, 6225), False, 'from maupp.utils import reproject_features, iter_geoms, filter_features\n'), ((7728, 7856), 'rasterio.Affine', 'Affine', (['(src_transform.a * scale)', 'src_transform.b', 'src_transform.c', 'src_transform.d', '(src_transform.e * scale)', 'src_transform.f'], {}), '(src_transform.a * scale, src_transform.b, src_transform.c,\n src_transform.d, src_transform.e * scale, src_transform.f)\n', (7734, 7856), False, 'from rasterio import Affine\n'), ((9060, 9104), 'numpy.empty', 'np.empty', (['(dst_height, dst_width)', '"""float32"""'], {}), "((dst_height, dst_width), 'float32')\n", (9068, 9104), True, 'import numpy as np\n'), ((9109, 9258), 'rasterio.warp.reproject', 'reproject', (['src_array', 'dst_array'], {'src_transform': 'src_transform', 'src_crs': 'crs', 'dst_transform': 'dst_transform', 'dst_crs': 'crs', 'resampling': 'resampling_method'}), '(src_array, dst_array, src_transform=src_transform, src_crs=crs,\n dst_transform=dst_transform, dst_crs=crs, resampling=resampling_method)\n', (9118, 9258), False, 'from rasterio.warp import reproject, Resampling\n'), ((10238, 10283), 'numpy.zeros', 'np.zeros', ([], {'shape': 'positive.shape', 'dtype': '"""uint8"""'}), "(shape=positive.shape, dtype='uint8')\n", (10246, 10283), True, 'import numpy as np\n'), ((1481, 1527), 'numpy.zeros', 'np.zeros', ([], {'shape': '(height, width)', 'dtype': 'np.bool'}), '(shape=(height, width), dtype=np.bool)\n', (1489, 1527), True, 'import numpy as np\n'), ((3059, 3105), 'numpy.zeros', 'np.zeros', ([], {'shape': '(height, width)', 'dtype': 'np.bool'}), '(shape=(height, width), dtype=np.bool)\n', (3067, 3105), True, 'import numpy as np\n'), ((5066, 5112), 'numpy.zeros', 'np.zeros', ([], {'shape': '(height, width)', 'dtype': 'np.bool'}), '(shape=(height, width), dtype=np.bool)\n', (5074, 5112), True, 'import numpy as np\n'), ((6489, 6528), 'maupp.utils.reproject_features', 'reproject_features', (['builtup', 'WGS84', 'crs'], {}), '(builtup, WGS84, crs)\n', (6507, 6528), False, 'from maupp.utils import reproject_features, iter_geoms, filter_features\n'), ((6771, 6817), 'numpy.zeros', 'np.zeros', ([], {'shape': '(height, width)', 'dtype': 'np.bool'}), '(shape=(height, width), dtype=np.bool)\n', (6779, 6817), True, 'import numpy as np\n'), ((7009, 7030), 'numpy.logical_not', 'np.logical_not', (['urban'], {}), '(urban)\n', (7023, 7030), True, 'import numpy as np\n'), ((1644, 1664), 'maupp.utils.iter_geoms', 'iter_geoms', (['features'], {}), '(features)\n', (1654, 1664), False, 'from maupp.utils import reproject_features, iter_geoms, filter_features\n'), ((4839, 4856), 'pandas.isna', 'pd.isna', (['row[key]'], {}), '(row[key])\n', (4846, 4856), True, 'import pandas as pd\n'), ((6264, 6281), 'maupp.utils.iter_geoms', 'iter_geoms', (['roads'], {}), '(roads)\n', (6274, 6281), False, 'from maupp.utils import reproject_features, iter_geoms, filter_features\n'), ((3500, 3513), 'shapely.geometry.mapping', 'mapping', (['geom'], {}), '(geom)\n', (3507, 3513), False, 'from shapely.geometry import mapping\n'), ((5188, 5201), 'shapely.geometry.mapping', 'mapping', (['geom'], {}), '(geom)\n', (5195, 5201), False, 'from shapely.geometry import mapping\n'), ((6577, 6596), 'maupp.utils.iter_geoms', 'iter_geoms', (['builtup'], {}), '(builtup)\n', (6587, 6596), False, 'from maupp.utils import reproject_features, iter_geoms, filter_features\n')] |
import json
import numpy as np
import subprocess
from my_utils.squad_eval import get_bleu_moses
import os.path
def pred2words(prediction, vocab):
EOS_token = 3
outputs = []
for pred in prediction:
new_pred = pred
for i, x in enumerate(pred):
if int(x) == EOS_token:
new_pred = pred[:i]
break
outputs.append(' '.join([vocab[int(x)] for x in new_pred]))
return outputs
def get_answer(path):
with open(path, encoding="utf8") as f:
answers = json.load(f)
return answers
def eval_test_loss(model, dev_batches):
dev_batches.reset()
tot_loss = 0
num_data = 0
for batch in dev_batches:
loss = model.eval_test_loss(batch)
batch_size = len(batch['answer_token'])
num_data += batch_size
tot_loss += loss * batch_size
return tot_loss / num_data
def compute_diversity(hypotheses,output_path):
hypothesis_pipe = '\n'.join([' '.join(hyp) for hyp in hypotheses])
pipe = subprocess.Popen(
["perl", './bleu_eval/diversity.pl.remove_extension', output_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
pipe.stdin.write(hypothesis_pipe.encode())
pipe.stdin.close()
return pipe.stdout.read()
def check(model, dev_batches, vocab, full_path='',
output_path='', full_output=''):
dev_batches.reset()
predictions = []
pred_words = []
dev_toks_tmp = []
dev_fact_tmp = []
dev_toks = []
dev_fact = []
for batch in dev_batches:
prediction, prediction_topks = model.predict(batch)
pred_word = pred2words(prediction, vocab)
prediction = [np.asarray(x, dtype=np.str).tolist() for x in prediction]
predictions += prediction
pred_words += pred_word
dev_toks_tmp += batch['answer_token'].numpy().tolist()
dev_fact_tmp += batch['doc_tok'].numpy().tolist()
for t, f in zip(dev_toks_tmp, dev_fact_tmp):
t = t[1:t.index(3)]
if 0 in f:
f = f[:f.index(0)]
dev_toks.append(t)
dev_fact.append(f)
dev_toks = pred2words(dev_toks, vocab)
dev_fact = pred2words(dev_fact, vocab)
dev_toks_list = [line.strip().split(' ') for line in dev_toks]
pred_words_list = [line.strip().split(' ') for line in pred_words]
dev_fact_list = [line.strip().split(' ') for line in dev_fact]
bleu_result = get_bleu_moses(pred_words_list, dev_toks_list)
bleu_fact = get_bleu_moses(dev_fact_list, pred_words_list)
bleu = str(bleu_result).split('=')
bleu = bleu[1].split(',')[0]
bleu_fact = str(bleu_fact).split('=')
bleu_fact = bleu_fact[1].split(',')[0]
with open(output_path, 'w') as f:
for hypothesis in pred_words_list:
f.write(' '.join(hypothesis) + '\n')
with open(full_path, 'r', encoding='utf8') as fr:
full_lines = fr.readlines()
assert (len(full_lines) == len(pred_words_list))
with open(full_output, 'w', encoding='utf8') as fw:
for f, g in zip(full_lines,pred_words_list):
f = f.strip().split('\t')
f[-1] = ' '.join(g).strip()
f = '\t'.join(f)
fw.write(f + '\n')
fw.flush()
diversity = compute_diversity(pred_words_list, output_path)
diversity = str(diversity).strip().split()
diver_uni = diversity[0][2:]
diver_bi = diversity[1][:-3]
return bleu, bleu_fact, diver_uni, diver_bi
def write_test_metrics(model_name, dstc_dict, path_report):
d = dstc_dict
n_lines = d['n_lines']
nist = d['nist']
bleu = d['bleu']
meteor = d['meteor']
entropy = d['entropy']
div = d['diversity']
avg_len = d['avg_len']
results = [n_lines] + nist + bleu + [meteor] + entropy + div + [avg_len]
if not os.path.isfile(path_report):
with open(path_report, 'w') as f:
f.write('\t'.join(
['fname', 'n_lines'] + \
['nist%i' % i for i in range(1, 4 + 1)] + \
['bleu%i' % i for i in range(1, 4 + 1)] + \
['meteor'] + \
['entropy%i' % i for i in range(1, 4 + 1)] + \
['div1', 'div2', 'avg_len']
) + '\n')
with open(path_report, 'a') as f:
f.write('\t'.join(map(str, [model_name] + results)) + '\n')
| [
"numpy.asarray",
"subprocess.Popen",
"json.load",
"my_utils.squad_eval.get_bleu_moses"
] | [((1022, 1157), 'subprocess.Popen', 'subprocess.Popen', (["['perl', './bleu_eval/diversity.pl.remove_extension', output_path]"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['perl', './bleu_eval/diversity.pl.remove_extension',\n output_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n", (1038, 1157), False, 'import subprocess\n'), ((2424, 2470), 'my_utils.squad_eval.get_bleu_moses', 'get_bleu_moses', (['pred_words_list', 'dev_toks_list'], {}), '(pred_words_list, dev_toks_list)\n', (2438, 2470), False, 'from my_utils.squad_eval import get_bleu_moses\n'), ((2487, 2533), 'my_utils.squad_eval.get_bleu_moses', 'get_bleu_moses', (['dev_fact_list', 'pred_words_list'], {}), '(dev_fact_list, pred_words_list)\n', (2501, 2533), False, 'from my_utils.squad_eval import get_bleu_moses\n'), ((538, 550), 'json.load', 'json.load', (['f'], {}), '(f)\n', (547, 550), False, 'import json\n'), ((1687, 1714), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'np.str'}), '(x, dtype=np.str)\n', (1697, 1714), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python3.8
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import read_input
import numpy as np
import os
import codecs
from gurobipy import *
PYTHONIOENCODING = "utf-8"
capitals = {u'à': u'À', u'â': u'Â', u'ç': u'Ç', u'é': u'É', u'è': u'È', u'ê': u'Ê',
u'ë': u'Ë', u'î': u'Î', u'ï': u'Ï', u'ô': u'Ô',
u'ù': u'Ù', u'û': u'Û', u'ü': u'Ü', u'ÿ': u'Ÿ', u'æ': u'Æ', u'œ': u'Œ',
u'ß': u'ẞ', u'þ': u'Þ', u'ð': u'Ð', u'ŋ': u'Ŋ', u'ij': u'IJ',
u'ə': u'Ə', u'ʒ': u'Ʒ', u'ı': u'İ'}
def get_objectives(mapping, w_p, w_a, w_f, w_e, corpus_weights, quadratic=0):
"""
Computes and returns the objectives of the given mapping with the given weights
"""
azerty, \
characters, \
keyslots, \
letters, \
p_single, p_bigram, \
performance, \
similarity_c_c, similarity_c_l, \
distance_level_0, distance_level_1, \
ergonomics \
= read_input.get_all_input_values(corpus_weights)
return accu_get_objectives(mapping, w_p, w_a, w_f, w_e,
azerty,
characters,
keyslots,
letters,
p_single, p_bigram,
performance,
similarity_c_c, similarity_c_l,
distance_level_0, distance_level_1,
ergonomics, quadratic=quadratic)
def accu_get_objectives(mapping, w_p, w_a, w_f, w_e,
azerty,
characters,
keyslots,
letters,
p_single, p_bigram,
performance,
similarity_c_c, similarity_c_l,
distance_level_0, distance_level_1,
ergonomics, quadratic=1):
"""
For a given mapping, returns the objective value for the given weights, and the individual objectives values for P,A,F,E
"""
# Compute linear cost matrices
linear_cost, x_P, x_A, x_F, x_E = get_linear_costs(w_p, w_a, w_f, w_e,
azerty,
characters,
keyslots,
letters,
p_single, p_bigram,
performance,
similarity_c_c, similarity_c_l,
distance_level_0, distance_level_1,
ergonomics)
# remove letters from mapping that are not in characters list
fixed = read_input.get_fixed_mapping()
remove_keys = []
# remove invalid characters and keyslots from the mapping
for c, s in mapping.items():
if not c in characters:
remove_keys.append(c)
print("%s not in the to-be-mapped character set" % c)
elif not s in keyslots:
remove_keys.append(c)
print("%s not mapped to a keyslot for which we have values" % s)
mapping = {c: s for c, s in mapping.items() if c not in remove_keys}
P = 0
A = 0
F = 0
E = 0
for c, s in mapping.items():
P += x_P[c, s]
A += x_A[c, s]
F += x_F[c, s]
E += x_E[c, s]
lin_A = A
if quadratic:
prob_sim, distance_level_0_norm = get_quadratic_costs(characters, \
keyslots, \
p_single,
similarity_c_c)
for (c1, c2) in similarity_c_c:
if c1 in mapping and c2 in mapping:
s1 = mapping[c1]
s2 = mapping[c2]
v = prob_sim[c1, c2] * distance_level_0_norm[s1, s2]
A += v
if P < 0:
print("Performance negative, rounded to 0: %f" % P)
P = np.max([0, P])
if A < 0:
print("Association negative, rounded to 0: %f" % A)
A = np.max([0, A])
if F < 0:
print("Familiarity negative, rounded to 0: %f" % F)
F = np.max([0, F])
if E < 0:
print("Ergonomics negative, rounded to 0: %f" % E)
E = np.max([0, E])
objective = w_p * P + w_a * A + w_f * F + w_e * E
print("objective: ", objective)
return objective, P, A, F, E
def get_linear_costs(w_p, w_a, w_f, w_e,
azerty,
characters,
keyslots,
letters,
p_single, p_bigram,
performance,
similarity_c_c, similarity_c_l,
distance_level_0, distance_level_1,
ergonomics):
""" computes the linear cost: for each linear variable x[c,s] compute the P, A, F and E term (as if it is chosen)
Returns the linear cost for each objective and the weighted sum.
"""
scenario, char_set = read_input.get_scenario_and_char_set()
if os.path.isfile("input/normalized/" + "x_P_" + scenario + "_" + char_set + ".txt"):
x_P = _read_tuple_list_to_dict("input/normalized/" + "x_P_" + scenario + "_" + char_set + ".txt")
x_A = _read_tuple_list_to_dict("input/normalized/" + "x_A_" + scenario + "_" + char_set + ".txt")
x_F = _read_tuple_list_to_dict("input/normalized/" + "x_F_" + scenario + "_" + char_set + ".txt")
x_E = _read_tuple_list_to_dict("input/normalized/" + "x_E_" + scenario + "_" + char_set + ".txt")
else:
print("Getting linear cost")
x_P = {}
x_A = {}
x_F = {}
x_E = {}
for c in characters:
for s in keyslots:
P = 0
A = 0
# if that character was previously not on azerty, distance is 0.
F = p_single[c] * distance_level_1.get((s, azerty.get(c, "NaN")), 0)
E = 0
for l in letters:
# update performance
if (c, l) in p_bigram:
P += (p_bigram[(c, l)] * performance[(s, azerty[l])])
if (l, c) in p_bigram:
P += (p_bigram[(l, c)] * performance[(azerty[l], s)])
# update association. This is symmetric, so we add it twice to make it comparable with the other scores
if (c, l) in similarity_c_l or (l, c) in similarity_c_l:
try:
A += 2 * (similarity_c_l[(c, l)] * distance_level_0[s, azerty[l]])
except KeyError:
A += 2 * (similarity_c_l[(l, c)] * distance_level_0[s, azerty[l]])
# update ergonomics
if (c, l) in p_bigram:
E += (p_bigram[(c, l)] * ergonomics[(s, azerty[l])])
if (l, c) in p_bigram:
E += (p_bigram[(l, c)] * ergonomics[(azerty[l], s)])
x_P[c, s] = P
x_A[c, s] = A
x_F[c, s] = F
x_E[c, s] = E
# now normalize these terms by minimizing/maximizing each individually such that they are all between 0 and 1
print("========= Normalize Performance =========")
x_P = normalize_empirically(x_P, characters, keyslots, capitalization_constraints=1)
print("========= Normalize Association =========")
x_A = normalize_empirically(x_A, characters, keyslots, capitalization_constraints=1)
print("========= Normalize Familiarity =========")
x_F = normalize_empirically(x_F, characters, keyslots, capitalization_constraints=1)
print("========= Normalize Ergonomics =========")
x_E = normalize_empirically(x_E, characters, keyslots, capitalization_constraints=1)
# write into file for later use
write_tuplelist(x_P, "input/normalized/" + "x_P_" + scenario + "_" + char_set + ".txt")
write_tuplelist(x_A, "input/normalized/" + "x_A_" + scenario + "_" + char_set + ".txt")
write_tuplelist(x_F, "input/normalized/" + "x_F_" + scenario + "_" + char_set + ".txt")
write_tuplelist(x_E, "input/normalized/" + "x_E_" + scenario + "_" + char_set + ".txt")
# weighted sum of linear terms
linear_cost = {}
for c in characters:
for s in keyslots:
linear_cost[c, s] = (w_p * x_P[c, s]) + (w_a * x_A[c, s]) + (w_f * x_F[c, s]) + (w_e * x_E[c, s])
return linear_cost, x_P, x_A, x_F, x_E
def get_quadratic_costs(characters, \
keyslots, \
p_single, \
similarity_c_c):
scenario, char_set = read_input.get_scenario_and_char_set()
if os.path.isfile("input/normalized/" + "prob_sim_" + scenario + "_" + char_set + ".txt"):
prob_sim = _read_tuple_list_to_dict("input/normalized/" + "prob_sim_" + scenario + "_" + char_set + ".txt")
distance_level_0_norm = _read_tuple_list_to_dict(
"input/normalized/" + "distance_" + scenario + "_" + char_set + ".txt")
else:
distance_level_0 = read_input.get_distance_consistency()
print("Getting quadratic cost")
prob_sim = {}
for c1 in characters:
for c2 in characters:
if (c1, c2) in similarity_c_c.keys():
# do not add association cost if both lowercase and capital letter are
# in character set, will be accounted for by cap.constr.
if not (c1 in capitals and c2 == capitals[c1]):
p = similarity_c_c[c1, c2]
prob_sim[(c1, c2)] = p
else:
prob_sim[(c1, c2)] = 0
else:
prob_sim[(c1, c2)] = 0
# normalize with normalization factor of full objective (later multiplied with distance)
max_sum = 0
min_sum = 0
for c1 in characters:
# for each character determine the maximum association cost for assigning that character to a slot and sum up
costs_per_slot_min = []
costs_per_slot_max = []
for s1 in keyslots:
tmp_sum_min = 0 # sum up the association cost for all other characters
tmp_sum_max = 0
for c2 in characters:
if c1 != c2:
# add maximum association cost if that character was assigned to a key
tmp_sum_max += np.max(
[prob_sim[c1, c2] * distance_level_0[s1, s2] for s2 in keyslots if s2 != s1])
tmp_sum_min += np.min(
[prob_sim[c1, c2] * distance_level_0[s1, s2] for s2 in keyslots if s2 != s1])
costs_per_slot_min.append(tmp_sum_min)
costs_per_slot_max.append(tmp_sum_max)
max_sum += np.max(costs_per_slot_max) #
min_sum += np.min(costs_per_slot_min) #
# normalization factor is included in the distance because there all values are > 0. Otherwise there are some problems
distance_level_0_norm = distance_level_0.copy()
n = len(characters)
for (s1, s2), v in distance_level_0.items():
if v > 0:
distance_level_0_norm[(s1, s2)] = ((v - (min_sum / float(n))) / (float(max_sum) - float(min_sum)))
# write into file for later use
write_tuplelist(prob_sim, "input/normalized/" + "prob_sim_" + scenario + "_" + char_set + ".txt")
write_tuplelist(distance_level_0_norm, "input/normalized/" + "distance_" + scenario + "_" + char_set + ".txt")
return prob_sim, distance_level_0_norm
def normalize_dict_values(d):
"""
Normalizes all values to be between 0 and 1 such that they maximally sum up to 1
"""
# normalize single values to be between 0 and 1
maximum = np.max(list(d.values()))
minimum = np.min(list(d.values()))
for k, v in d.items():
d[k] = (v - minimum) / float(maximum - minimum)
return d
def normalize_empirically(X_O, characters, keyslots, capitalization_constraints=1):
"""
Normalize empirically for the result to be between 0 and 1.
First minimizes and maximizes the keyboard problem for the given cost (X_O) and then normalizes all values in X_O
to minimally/maximally sum up to 0/1.
Can only be used for the linear terms (Performance, Association, Ergonomics, Familiarity)
"""
if len(characters) > len(keyslots):
print("Error: more characters sthan keyslots")
return
m = Model("keyboard_layout")
# add decision variables
x = {}
for c in characters:
for s in keyslots:
n = u"" + c + u"_to_" + s
n = n.encode("utf-8")
x[c, s] = m.addVar(vtype=GRB.BINARY, name=n)
m.update()
m._vars = m.getVars()
# Define the objective terms
O = quicksum(
X_O[c, s] * x[c, s] \
for c in characters for s in keyslots
)
m._O = O
# add the constraints. One for each character, one for each keyslot
for c in characters:
m.addConstr(quicksum(x[c, s] for s in keyslots) == 1, c + "_mapped_once")
for s in keyslots:
m.addConstr(quicksum(x[c, s] for c in characters) <= 1, s + "_assigned_at_most_once")
if capitalization_constraints:
print("Adding capitalization constraints")
for c, s_c in capitals.items():
if c in characters and s_c in characters:
for k in keyslots:
if "Shift" in k:
m.addConstr(x[c, k] == 0, c + "_not_mapped_to_shifted_key_" + k)
else:
if k + "_Shift" in keyslots:
# if character is assigned to this key, its capital version must be assigned to shifted version of the key
m.addConstr(x[c, k] - x[s_c, k + "_Shift"] == 0,
c + "_and_" + s_c + "mapped_to_shifted_" + k)
else:
# unshifted version should not be assigned to key where shifted version is not available
m.addConstr(x[c, k] == 0, c + "_no_shift_available_" + k)
m.setParam("OutputFlag", 0)
m.update()
# Set objective
m.setObjective(O, GRB.MINIMIZE)
m.update()
# Optimize
m.optimize()
minimum = m._O.getValue()
print("===> Minimum: %.5f" % minimum)
# Set objective
m.setObjective(O, GRB.MAXIMIZE)
m.update()
# Optimize
m.optimize()
maximum = m._O.getValue()
print("===> Maximum: %.5f" % maximum)
n = len(characters)
for k, v in X_O.items():
X_O[k] = (v - (minimum / float(n))) / (float(maximum) - float(minimum))
return X_O
def _read_tuple_list_to_dict(path):
"""
Reads a file into a dictionary.
The file must have the following format:
key1 key2 value
Then the dictionary is of the form:
{(key1,key2):value}
"""
with codecs.open(path, 'r', encoding="utf-8") as bigram_file:
all_lines = bigram_file.readlines()
lines = [l.rstrip() for l in all_lines]
# create dict
p_bigrams = {}
for l in lines:
parts = l.split(" ")
if len(parts) == 3:
if parts[0] != "" and parts[1] != "":
p_bigrams[(parts[0], parts[1])] = float(parts[2])
return p_bigrams
def write_tuplelist(data, filename):
data_strings = ["%s %s %f\n" % (s, l, n) for (s, l), n in data.items()]
data_strings = [s.encode("utf-8") for s in data_strings]
with open(filename, 'w') as data_file:
data_file.writelines(data_strings)
| [
"read_input.get_all_input_values",
"codecs.open",
"read_input.get_fixed_mapping",
"os.path.isfile",
"numpy.max",
"read_input.get_distance_consistency",
"read_input.get_scenario_and_char_set",
"numpy.min"
] | [((1996, 2043), 'read_input.get_all_input_values', 'read_input.get_all_input_values', (['corpus_weights'], {}), '(corpus_weights)\n', (2027, 2043), False, 'import read_input\n'), ((3976, 4006), 'read_input.get_fixed_mapping', 'read_input.get_fixed_mapping', ([], {}), '()\n', (4004, 4006), False, 'import read_input\n'), ((6356, 6394), 'read_input.get_scenario_and_char_set', 'read_input.get_scenario_and_char_set', ([], {}), '()\n', (6392, 6394), False, 'import read_input\n'), ((6403, 6488), 'os.path.isfile', 'os.path.isfile', (["('input/normalized/' + 'x_P_' + scenario + '_' + char_set + '.txt')"], {}), "('input/normalized/' + 'x_P_' + scenario + '_' + char_set +\n '.txt')\n", (6417, 6488), False, 'import os\n'), ((10103, 10141), 'read_input.get_scenario_and_char_set', 'read_input.get_scenario_and_char_set', ([], {}), '()\n', (10139, 10141), False, 'import read_input\n'), ((10149, 10239), 'os.path.isfile', 'os.path.isfile', (["('input/normalized/' + 'prob_sim_' + scenario + '_' + char_set + '.txt')"], {}), "('input/normalized/' + 'prob_sim_' + scenario + '_' +\n char_set + '.txt')\n", (10163, 10239), False, 'import os\n'), ((5304, 5318), 'numpy.max', 'np.max', (['[0, P]'], {}), '([0, P])\n', (5310, 5318), True, 'import numpy as np\n'), ((5405, 5419), 'numpy.max', 'np.max', (['[0, A]'], {}), '([0, A])\n', (5411, 5419), True, 'import numpy as np\n'), ((5506, 5520), 'numpy.max', 'np.max', (['[0, F]'], {}), '([0, F])\n', (5512, 5520), True, 'import numpy as np\n'), ((5606, 5620), 'numpy.max', 'np.max', (['[0, E]'], {}), '([0, E])\n', (5612, 5620), True, 'import numpy as np\n'), ((10532, 10569), 'read_input.get_distance_consistency', 'read_input.get_distance_consistency', ([], {}), '()\n', (10567, 10569), False, 'import read_input\n'), ((16531, 16571), 'codecs.open', 'codecs.open', (['path', '"""r"""'], {'encoding': '"""utf-8"""'}), "(path, 'r', encoding='utf-8')\n", (16542, 16571), False, 'import codecs\n'), ((12341, 12367), 'numpy.max', 'np.max', (['costs_per_slot_max'], {}), '(costs_per_slot_max)\n', (12347, 12367), True, 'import numpy as np\n'), ((12394, 12420), 'numpy.min', 'np.min', (['costs_per_slot_min'], {}), '(costs_per_slot_min)\n', (12400, 12420), True, 'import numpy as np\n'), ((11941, 12031), 'numpy.max', 'np.max', (['[(prob_sim[c1, c2] * distance_level_0[s1, s2]) for s2 in keyslots if s2 != s1]'], {}), '([(prob_sim[c1, c2] * distance_level_0[s1, s2]) for s2 in keyslots if\n s2 != s1])\n', (11947, 12031), True, 'import numpy as np\n'), ((12094, 12184), 'numpy.min', 'np.min', (['[(prob_sim[c1, c2] * distance_level_0[s1, s2]) for s2 in keyslots if s2 != s1]'], {}), '([(prob_sim[c1, c2] * distance_level_0[s1, s2]) for s2 in keyslots if\n s2 != s1])\n', (12100, 12184), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import re, sys
from math import hypot, atan2, degrees, pi as PI
import numpy as np
reload(sys)
sys.setdefaultencoding('utf-8')
class DistMatrix(object):
#vec: array of Point
def __init__(self, vec):
self.vec = vec
self._dist_products()
def _dist_products(self):
r = (len(self.vec), len(self.vec))
self.matrix = np.zeros(r)
for c_i, c_p in enumerate(self.vec):
for r_i, r_p in enumerate(self.vec):
self.matrix[c_i,r_i] = c_p.distance(r_p)
def select_in(self, index, max_dist):
return np.where( self.matrix[index] <= max_dist )[0]
def select_out(self, index, min_dist):
return np.where( self.matrix[index] > min_dist )[0]
def distance(self, from_index, to_index):
return self.matrix[from_index, to_index]
def __str__(self):
return str(self.matrix)
class Point(object):
POINT_PROG = re.compile('\[(-?[0-9]+),(-?[0-9]+)\]')
def __init__(self, x, y):
self.x = x
self.y = y
# 0.0 <= angle < 2
def angle(self, other):
dy = self.y - other.y
dx = self.x - other.x
return (atan2(dy, dx) % (2.0*PI)) / PI
@property
def width(self):
return self.x
@property
def height(self):
return self.y
def equals(self, point):
return self.x == point.x and self.y == point.y
def distance(self, point):
return hypot(point.x-self.x, point.y-self.y)
def __str__(self):
return "[%d,%d]" % (self.x, self.y)
@staticmethod
def to_point(str_point):
p1 = Point(0,0)
try:
m = re.match(Point.POINT_PROG, str_point)
if(m is not None):
p1 = Point(int(m.group(1)), int(m.group(2)))
except:
pass
return p1
class Bounds(object):
BOUNDS_PROG = re.compile(r'\[(-?[0-9]+),(-?[0-9]+)\]\[(-?[0-9]+),(-?[0-9]+)\]')
SCROLL_SWIPE_MARGIN = 2
RELATIVE_POS_OVERLAP = 5
RELATIVE_POS_LEFT = 4
RELATIVE_POS_TOP = 2
RELATIVE_POS_RIGHT = 3
RELATIVE_POS_BOTTOM = 1
RELATIVE_POS_UNKNOWN = -1
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
@property
def left(self):
return self.p1.x
@property
def top(self):
return self.p1.y
@property
def right(self):
return self.p2.x
@property
def bottom(self):
return self.p2.y
@property
def center(self):
x = self.left + int(self.width*0.5)
y = self.top + int(self.height*0.5)
return Point(x, y)
@property
def width(self):
w = self.right - self.left
if w < 0:
w = 0
return w
@property
def height(self):
h = self.bottom - self.top
if h < 0:
h = 0
return h
@property
def area(self):
a = self.width * self.height
if a < 0:
a = 0
return a
def _overlap_pos(self, bounds):
p = self
c = bounds
if bounds.contains(self):
p = bounds
c = self
h_width = p.width / 3
h_height = p.height / 3
# pos_01(8) | pos_02(9) | pos_03(7)
# pos_04(6) | pos_05(5) | pos_06(4)
# pos_07(3) | pos_08(2) | pos_09(1)
pos_01 = Bounds(p.p1, Point(p.left+(h_width*1), p.top+(h_height*1)))
pos_02 = Bounds(p.p1, Point(p.left+(h_width*2), p.top+(h_height*1)))
pos_03 = Bounds(p.p1, Point(p.left+(h_width*3), p.top+(h_height*1)))
pos_04 = Bounds(p.p1, Point(p.left+(h_width*1), p.top+(h_height*2)))
pos_05 = Bounds(p.p1, Point(p.left+(h_width*2), p.top+(h_height*2)))
pos_06 = Bounds(p.p1, Point(p.left+(h_width*3), p.top+(h_height*2)))
pos_07 = Bounds(p.p1, Point(p.left+(h_width*1), p.top+(h_height*3)))
pos_08 = Bounds(p.p1, Point(p.left+(h_width*2), p.top+(h_height*3)))
pos_09 = Bounds(p.p1, Point(p.left+(h_width*3), p.top+(h_height*3)))
b_center = c.center
alpha = 0
if pos_01.contains_point(b_center.x, b_center.y):
alpha = 0.8
elif pos_02.contains_point(b_center.x, b_center.y):
alpha = 0.9
elif pos_03.contains_point(b_center.x, b_center.y):
alpha = 0.7
elif pos_04.contains_point(b_center.x, b_center.y):
alpha = 0.6
elif pos_05.contains_point(b_center.x, b_center.y):
alpha = 0.5
elif pos_06.contains_point(b_center.x, b_center.y):
alpha = 0.4
elif pos_07.contains_point(b_center.x, b_center.y):
alpha = 0.3
elif pos_08.contains_point(b_center.x, b_center.y):
alpha = 0.2
elif pos_09.contains_point(b_center.x, b_center.y):
alpha = 0.1
return alpha
# top(2)
# left(4) overlap(5) right(3)
# bottom(1)
def relative_pos(self, bounds):
# overlap
if self.intersect(bounds).area > 0:
alpha = 0
# if self.contains(bounds) or bounds.contains(self):
# alpha = self._overlap_pos(bounds)
return self.RELATIVE_POS_OVERLAP + alpha
# # 0.0 <= angle < 2.8*PI
# lt_angle = self.center.angle(self.p1)
# lb_angle = self.center.angle(Point(self.left, self.bottom))
# rt_angle = self.center.angle(Point(self.right, self.top))
# rb_angle = self.center.angle(self.p2)
#
# c2c_angle = self.center.angle(bounds.center)
#
# #left(e.g., degree => c2c_angle > 315 && c2c_angle <= 45)
# if c2c_angle > lb_angle and c2c_angle <= lt_angle:
# return self.RELATIVE_POS_LEFT
#
# #top(e.g., degree => c2c_angle > 45 && c2c_angle <= 135)
# if c2c_angle > lt_angle and c2c_angle <= rt_angle:
# return self.RELATIVE_POS_TOP
#
# #right(e.g., degree => c2c_angle > 135 && c2c_angle <= 225)
# if c2c_angle > rt_angle and c2c_angle <= rb_angle:
# return self.RELATIVE_POS_RIGHT
#
# #bottom(e.g., degree => c2c_angle > 225 && c2c_angle <= 315)
# if c2c_angle > rb_angle and c2c_angle <= lb_angle:
# return self.RELATIVE_POS_BOTTOM
# left
if self.left > bounds.left and self.left >= bounds.right and self.top < bounds.bottom and self.bottom > bounds.top:
return self.RELATIVE_POS_LEFT
# top
if self.top >= bounds.bottom:
return self.RELATIVE_POS_TOP
# right
if self.right <= bounds.left and self.right < bounds.right and self.top < bounds.bottom and self.bottom > bounds.top:
return self.RELATIVE_POS_RIGHT
# bottom
if self.top <= bounds.bottom:
return self.RELATIVE_POS_BOTTOM
return self.RELATIVE_POS_UNKNOWN
def size_up(self, width, height):
p1_x = self.left-width
p1_y = self.top-height
if p1_x <= 0: p1_x = 0
if p1_y <= 0: p1_y = 0
p2_x = self.right+width
p2_y = self.bottom+height
return Bounds(Point(p1_x, p1_y), Point(p2_x, p2_y))
def contains_point(self, x, y):
# check for empty first
if self.area == 0:
return False
return (x >= self.left and y >= self.top and x <= self.right and y <= self.bottom)
def contains(self, bounds):
if bounds.area == 0:
return False
return self.contains_point(bounds.left, bounds.top) and self.contains_point(bounds.right, bounds.bottom)
def intersect(self, bounds):
p1 = Point(max(self.left, bounds.left), max(self.top, bounds.top))
p2 = Point(min(self.right, bounds.right), min(self.bottom, bounds.bottom))
return Bounds(p1, p2)
def __str__(self):
return "%s%s" % (self.p1, self.p2)
def equals(self, bounds):
return self.p1.equals(bounds.p1) and self.p2.equals(bounds.p2)
@staticmethod
def to_bounds(str_bounds):
p1 = Point(0,0)
p2 = Point(0,0)
try:
m = re.match(Bounds.BOUNDS_PROG, str_bounds)
if(m is not None):
p1 = Point(int(m.group(1)), int(m.group(2)))
p2 = Point(int(m.group(3)), int(m.group(4)))
except:
pass
return Bounds(p1, p2)
###################################### Optional
def to_swipe_val(self):
center = self.center
x = self.p2.x-self.SCROLL_SWIPE_MARGIN
if x < 0:
x = self.SCROLL_SWIPE_MARGIN
afrom = Point(x, center.y)
ato = Point(self.p1.x+self.SCROLL_SWIPE_MARGIN, center.y)
return Bounds(afrom, ato)
def to_scroll_val(self):
center = self.center
y = self.p2.y-self.SCROLL_SWIPE_MARGIN
if y < 0:
y = self.SCROLL_SWIPE_MARGIN
afrom = Point(center.x, y)
ato = Point(center.x, self.p1.y+self.SCROLL_SWIPE_MARGIN)
return Bounds(afrom, ato)
def to_touch_val(self):
return self.center
| [
"math.hypot",
"math.atan2",
"numpy.zeros",
"re.match",
"numpy.where",
"sys.setdefaultencoding",
"re.compile"
] | [((120, 151), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (142, 151), False, 'import re, sys\n'), ((1018, 1059), 're.compile', 're.compile', (['"""\\\\[(-?[0-9]+),(-?[0-9]+)\\\\]"""'], {}), "('\\\\[(-?[0-9]+),(-?[0-9]+)\\\\]')\n", (1028, 1059), False, 'import re, sys\n'), ((2001, 2069), 're.compile', 're.compile', (['"""\\\\[(-?[0-9]+),(-?[0-9]+)\\\\]\\\\[(-?[0-9]+),(-?[0-9]+)\\\\]"""'], {}), "('\\\\[(-?[0-9]+),(-?[0-9]+)\\\\]\\\\[(-?[0-9]+),(-?[0-9]+)\\\\]')\n", (2011, 2069), False, 'import re, sys\n'), ((390, 401), 'numpy.zeros', 'np.zeros', (['r'], {}), '(r)\n', (398, 401), True, 'import numpy as np\n'), ((1563, 1604), 'math.hypot', 'hypot', (['(point.x - self.x)', '(point.y - self.y)'], {}), '(point.x - self.x, point.y - self.y)\n', (1568, 1604), False, 'from math import hypot, atan2, degrees, pi as PI\n'), ((653, 693), 'numpy.where', 'np.where', (['(self.matrix[index] <= max_dist)'], {}), '(self.matrix[index] <= max_dist)\n', (661, 693), True, 'import numpy as np\n'), ((766, 805), 'numpy.where', 'np.where', (['(self.matrix[index] > min_dist)'], {}), '(self.matrix[index] > min_dist)\n', (774, 805), True, 'import numpy as np\n'), ((1778, 1815), 're.match', 're.match', (['Point.POINT_PROG', 'str_point'], {}), '(Point.POINT_PROG, str_point)\n', (1786, 1815), False, 'import re, sys\n'), ((8518, 8558), 're.match', 're.match', (['Bounds.BOUNDS_PROG', 'str_bounds'], {}), '(Bounds.BOUNDS_PROG, str_bounds)\n', (8526, 8558), False, 'import re, sys\n'), ((1263, 1276), 'math.atan2', 'atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (1268, 1276), False, 'from math import hypot, atan2, degrees, pi as PI\n')] |
r"""
This module provides some tests of the integrator to known integrals.
Note, these are not the transformations, just the plain integrals, :math:`\int_0^\infty f(x) J_\nu(x) dx`
There is a corresponding notebook in devel/ that runs each of these functions through a grid of N and h,
showing the pattern of accuracy. This could be useful for finding the correct numbers to choose for these
for other unknown functions.
"""
import pytest
import numpy as np
from scipy.special import gamma, gammainc, gammaincc, k0
from hankel import HankelTransform
def gammainc_(a, x):
return gamma(a) * gammainc(a, x)
def gammaincc_(a, x):
return gamma(a) * gammaincc(a, x)
def test_nu0_f_unity():
"""
Test f(x) = 1, nu=0
This test is done in the Ogata (2005) paper, section 5"
"""
ht = HankelTransform(nu=0, N=50, h=0.1)
ans = ht.integrate(lambda x: 1, False, False)
print("Numerical Result: ", ans, " (required %s)" % 1)
assert np.isclose(ans, 1, rtol=1e-3)
def test_nu0_f_x_on_x2():
"""
Test f(x) = x/(x**2 + 1), nu=0
This test is done in the Ogata (2005) paper, section 5"
"""
ht = HankelTransform(nu=0, N=50, h=10 ** -1.5)
ans = ht.integrate(lambda x: x / (x ** 2 + 1), False, False)
print("Numerical Result: ", ans, " (required %s)" % k0(1))
assert np.isclose(ans, k0(1), rtol=1e-3)
def test_nu0_f_x2():
"""
Test f(x) = x^2, nu=0
Result on wikipedia
"""
ht = HankelTransform(nu=0, N=100, h=10 ** -1.5)
ans = ht.integrate(lambda x: x ** 2, False, False)
print("Numerical Result: ", ans, " (required -1)")
assert np.isclose(ans, -1, rtol=1e-3)
def test_nu0_x4():
"""
Result on wikipedia
"""
ht = HankelTransform(nu=0, N=150, h=10 ** -1.5)
ans = ht.integrate(lambda x: x ** 4, False, False)
print("Numerical Result: ", ans, " (required 9)")
assert np.isclose(ans, 9, rtol=1e-3)
def test_nu0_1_on_sqrt_x():
"""
Result on wikipedia
"""
# NOTE: this is REALLY finnicky!! (check devel/)
ht = HankelTransform(nu=0, N=160, h=10 ** -3.5)
ans = ht.integrate(lambda x: 1.0 / np.sqrt(x), False, False)
m = -1.5
anl = 2 ** (m + 1) * gamma(m / 2 + 1) / gamma(-m / 2)
print("Numerical Result: ", ans, " (required %s)" % anl)
assert np.isclose(ans, anl, rtol=1e-3)
def test_nu0_x_on_sqrt_x2_pz2():
"""
Result on wikipedia
"""
# Note that the number required is highly dependent on z .... smaller z is harder.
ht = HankelTransform(nu=0, N=50, h=10 ** -1.3)
z = 1
ans = ht.integrate(lambda x: x / np.sqrt(x ** 2 + z ** 2), False, False)
anl = np.exp(-z)
print("Numerical Result: ", ans, " (required %s)" % anl)
assert np.isclose(ans, anl, rtol=1e-3)
def test_nu0_f_gauss():
"""
Result on wikipedia
"""
z = 2
ht = HankelTransform(nu=0, N=50, h=0.01)
ans = ht.integrate(lambda x: x * np.exp(-0.5 * z ** 2 * x ** 2), False, False)
anl = 1.0 / z ** 2 * np.exp(-0.5 / z ** 2)
print("Numerical Result: ", ans, " (required %s)" % anl)
assert np.isclose(ans, anl, rtol=1e-3)
@pytest.mark.parametrize(
"s, nu, N, h",
[
[0, 1, 50, 0.05],
[0, 2, 50, 0.05],
[0.5, 1, 50, 0.05],
[-2, 2, 600, 10 ** -2.6], # This is pretty finnicky
[-0.783, 1, 50, 0.05],
],
)
def test_nu_varying_powerlaw(s, nu, N, h):
ht = HankelTransform(nu=nu, N=N, h=h)
ans = ht.integrate(lambda x: x ** (s + 1), False, False)
anl = 2 ** (s + 1) * gamma(0.5 * (2 + nu + s)) / gamma(0.5 * (nu - s))
print("Numerical Result: ", ans, " (required %s)" % anl)
assert np.isclose(ans, anl, rtol=1e-3)
@pytest.mark.parametrize(
"s, nu, N, h",
[[0.5, 1, 50, 0.05], [0.783, 1, 50, 0.05], [1.0, 0.5, 500, 0.01]],
)
def test_nu_varying_gamma_mod(s, nu, N, h):
ht = HankelTransform(nu=nu, N=N, h=h)
ans = ht.integrate(
lambda x: x ** (nu - 2 * s + 1) * gammainc_(s, x ** 2), False, False
)
anl = 0.5 ** (2 * s - nu - 1) * gammaincc_(1 - s + nu, 0.25)
print("Numerical Result: ", ans, " (required %s)" % anl)
assert np.isclose(ans, anl, rtol=1e-3)
| [
"hankel.HankelTransform",
"scipy.special.gammaincc",
"scipy.special.gammainc",
"numpy.isclose",
"scipy.special.k0",
"numpy.exp",
"pytest.mark.parametrize",
"scipy.special.gamma",
"numpy.sqrt"
] | [((3124, 3273), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s, nu, N, h"""', '[[0, 1, 50, 0.05], [0, 2, 50, 0.05], [0.5, 1, 50, 0.05], [-2, 2, 600, 10 **\n -2.6], [-0.783, 1, 50, 0.05]]'], {}), "('s, nu, N, h', [[0, 1, 50, 0.05], [0, 2, 50, 0.05],\n [0.5, 1, 50, 0.05], [-2, 2, 600, 10 ** -2.6], [-0.783, 1, 50, 0.05]])\n", (3147, 3273), False, 'import pytest\n'), ((3685, 3795), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""s, nu, N, h"""', '[[0.5, 1, 50, 0.05], [0.783, 1, 50, 0.05], [1.0, 0.5, 500, 0.01]]'], {}), "('s, nu, N, h', [[0.5, 1, 50, 0.05], [0.783, 1, 50, \n 0.05], [1.0, 0.5, 500, 0.01]])\n", (3708, 3795), False, 'import pytest\n'), ((815, 849), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': '(0)', 'N': '(50)', 'h': '(0.1)'}), '(nu=0, N=50, h=0.1)\n', (830, 849), False, 'from hankel import HankelTransform\n'), ((970, 1000), 'numpy.isclose', 'np.isclose', (['ans', '(1)'], {'rtol': '(0.001)'}), '(ans, 1, rtol=0.001)\n', (980, 1000), True, 'import numpy as np\n'), ((1149, 1190), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': '(0)', 'N': '(50)', 'h': '(10 ** -1.5)'}), '(nu=0, N=50, h=10 ** -1.5)\n', (1164, 1190), False, 'from hankel import HankelTransform\n'), ((1464, 1506), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': '(0)', 'N': '(100)', 'h': '(10 ** -1.5)'}), '(nu=0, N=100, h=10 ** -1.5)\n', (1479, 1506), False, 'from hankel import HankelTransform\n'), ((1629, 1660), 'numpy.isclose', 'np.isclose', (['ans', '(-1)'], {'rtol': '(0.001)'}), '(ans, -1, rtol=0.001)\n', (1639, 1660), True, 'import numpy as np\n'), ((1730, 1772), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': '(0)', 'N': '(150)', 'h': '(10 ** -1.5)'}), '(nu=0, N=150, h=10 ** -1.5)\n', (1745, 1772), False, 'from hankel import HankelTransform\n'), ((1893, 1923), 'numpy.isclose', 'np.isclose', (['ans', '(9)'], {'rtol': '(0.001)'}), '(ans, 9, rtol=0.001)\n', (1903, 1923), True, 'import numpy as np\n'), ((2055, 2097), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': '(0)', 'N': '(160)', 'h': '(10 ** -3.5)'}), '(nu=0, N=160, h=10 ** -3.5)\n', (2070, 2097), False, 'from hankel import HankelTransform\n'), ((2307, 2339), 'numpy.isclose', 'np.isclose', (['ans', 'anl'], {'rtol': '(0.001)'}), '(ans, anl, rtol=0.001)\n', (2317, 2339), True, 'import numpy as np\n'), ((2510, 2551), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': '(0)', 'N': '(50)', 'h': '(10 ** -1.3)'}), '(nu=0, N=50, h=10 ** -1.3)\n', (2525, 2551), False, 'from hankel import HankelTransform\n'), ((2650, 2660), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (2656, 2660), True, 'import numpy as np\n'), ((2733, 2765), 'numpy.isclose', 'np.isclose', (['ans', 'anl'], {'rtol': '(0.001)'}), '(ans, anl, rtol=0.001)\n', (2743, 2765), True, 'import numpy as np\n'), ((2850, 2885), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': '(0)', 'N': '(50)', 'h': '(0.01)'}), '(nu=0, N=50, h=0.01)\n', (2865, 2885), False, 'from hankel import HankelTransform\n'), ((3089, 3121), 'numpy.isclose', 'np.isclose', (['ans', 'anl'], {'rtol': '(0.001)'}), '(ans, anl, rtol=0.001)\n', (3099, 3121), True, 'import numpy as np\n'), ((3407, 3439), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': 'nu', 'N': 'N', 'h': 'h'}), '(nu=nu, N=N, h=h)\n', (3422, 3439), False, 'from hankel import HankelTransform\n'), ((3650, 3682), 'numpy.isclose', 'np.isclose', (['ans', 'anl'], {'rtol': '(0.001)'}), '(ans, anl, rtol=0.001)\n', (3660, 3682), True, 'import numpy as np\n'), ((3855, 3887), 'hankel.HankelTransform', 'HankelTransform', ([], {'nu': 'nu', 'N': 'N', 'h': 'h'}), '(nu=nu, N=N, h=h)\n', (3870, 3887), False, 'from hankel import HankelTransform\n'), ((4134, 4166), 'numpy.isclose', 'np.isclose', (['ans', 'anl'], {'rtol': '(0.001)'}), '(ans, anl, rtol=0.001)\n', (4144, 4166), True, 'import numpy as np\n'), ((590, 598), 'scipy.special.gamma', 'gamma', (['a'], {}), '(a)\n', (595, 598), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((601, 615), 'scipy.special.gammainc', 'gammainc', (['a', 'x'], {}), '(a, x)\n', (609, 615), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((651, 659), 'scipy.special.gamma', 'gamma', (['a'], {}), '(a)\n', (656, 659), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((662, 677), 'scipy.special.gammaincc', 'gammaincc', (['a', 'x'], {}), '(a, x)\n', (671, 677), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((1347, 1352), 'scipy.special.k0', 'k0', (['(1)'], {}), '(1)\n', (1349, 1352), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((2220, 2233), 'scipy.special.gamma', 'gamma', (['(-m / 2)'], {}), '(-m / 2)\n', (2225, 2233), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((2995, 3016), 'numpy.exp', 'np.exp', (['(-0.5 / z ** 2)'], {}), '(-0.5 / z ** 2)\n', (3001, 3016), True, 'import numpy as np\n'), ((3555, 3576), 'scipy.special.gamma', 'gamma', (['(0.5 * (nu - s))'], {}), '(0.5 * (nu - s))\n', (3560, 3576), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((1313, 1318), 'scipy.special.k0', 'k0', (['(1)'], {}), '(1)\n', (1315, 1318), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((2201, 2217), 'scipy.special.gamma', 'gamma', (['(m / 2 + 1)'], {}), '(m / 2 + 1)\n', (2206, 2217), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((3527, 3552), 'scipy.special.gamma', 'gamma', (['(0.5 * (2 + nu + s))'], {}), '(0.5 * (2 + nu + s))\n', (3532, 3552), False, 'from scipy.special import gamma, gammainc, gammaincc, k0\n'), ((2137, 2147), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (2144, 2147), True, 'import numpy as np\n'), ((2600, 2624), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + z ** 2)'], {}), '(x ** 2 + z ** 2)\n', (2607, 2624), True, 'import numpy as np\n'), ((2924, 2954), 'numpy.exp', 'np.exp', (['(-0.5 * z ** 2 * x ** 2)'], {}), '(-0.5 * z ** 2 * x ** 2)\n', (2930, 2954), True, 'import numpy as np\n')] |
import time
import numpy as np
from scipy import sparse
import argparse
parser = argparse.ArgumentParser()
from multiprocessing import Pool
from surprise import accuracy, SVD, NormalPredictor, KNNBasic, BaselineOnly
from src.models.cf_utils import *
class Model():
def __init__(self, name, algo, ks):
self.name = name
self.algo = algo
self.ks = ks
def train(self, trainset):
print('training ', self.name, '... ', end='')
start = time.time()
self.algo.fit(trainset)
end = time.time()
print('done in ', round(end-start), 'seconds')
def predict(self, testset, cold_testset):
self.predictions = self.algo.test(testset)
self.cold_predictions = self.algo.test(testset)
def evaluate_all_users(self):
print('evaluating all users', self.name, '... ', end='')
start = time.time()
self.mae = accuracy.mae(self.predictions, verbose=False)
self.rmse = accuracy.rmse(self.predictions, verbose=False)
precisions_and_recalls = [precision_recall_at_k(self.predictions, k) for k in self.ks]
self.MAPs, self.MARs = zip(*precisions_and_recalls)
end = time.time()
print('done in ', round(end-start), 'seconds')
def evaluate_cold_users(self):
print('evaluating cold users', self.name, '... ', end='')
start = time.time()
self.cold_mae = accuracy.mae(self.cold_predictions, verbose=False)
self.cold_rmse = accuracy.rmse(self.cold_predictions, verbose=False)
precisions_and_recalls = [precision_recall_at_k(self.cold_predictions, k) for k in self.ks]
self.cold_MAPs, self.cold_MARs = zip(*precisions_and_recalls)
end = time.time()
print('done in ', round(end-start), 'seconds')
def run_model(model, trainset, testset, cold_testset):
model.train(trainset)
model.predict(testset, cold_testset)
model.evaluate_all_users()
model.evaluate_cold_users()
return model
def run(masked_R_coo, unmasked_vals_coo, unmasked_cold_coo, mask_coo, mask_csr, ks, aug):
trainset, testset, cold_testset = setup(masked_R_coo, unmasked_vals_coo, unmasked_cold_coo)
models = [
Model(name='random', algo=NormalPredictor(), ks=ks),
Model(name='bias only', algo=BaselineOnly(verbose=False, bsl_options = {'method': 'sgd','learning_rate': .00005,}), ks=ks),
Model(name='SVD', algo=SVD(verbose=False), ks=ks),
# Model(name='KNN', algo=KNNBasic(verbose=False), ks=ks),
]
args = [(model, trainset, testset, cold_testset) for model in models]
with Pool() as pool:
models = pool.starmap(run_model, args)
show_and_save(models, aug)
if __name__ == "__main__":
parser.add_argument("--augmented_file_path", default="'/mnt/nfs/scratch1/rbialik/adversarial-recommendation-systems/model_params/generated_100_user_neighbors.npy'",
type=str, required=False,
help="Generated data file path")
parser.add_argument("--use_augmentation", default='no',
type=str, required=False,
help="whether to use augmentation `yes` otherwise `no`")
args, unknown = parser.parse_known_args()
generated_users_file = args.augmented_file_path
aug = args.use_augmentation
print("augmentation use or not {}".format(aug))
print("file path for augmented data {}".format(generated_users_file))
# masked_R_coo, unmasked_R_coo = toy_example()
masked_R_coo, unmasked_R_coo = get_data_from_dataloader()
if aug == 'yes':
generated_users = np.load(generated_users_file, allow_pickle=True).item()
num_ids = len(generated_users.keys())
neighbor_per_id, neighbor_dim = generated_users[list(generated_users.keys())[0]].shape
generated_users_coo = sparse.coo_matrix(np.array([v for v in generated_users.values()]).reshape(num_ids * neighbor_per_id, neighbor_dim))
masked_R_coo = sparse.vstack([masked_R_coo, generated_users_coo])
unmasked_R_coo = sparse.vstack([unmasked_R_coo, generated_users_coo])
aug = True
else:
aug = False
mask_coo = sparse.coo_matrix(logical_xor(masked_R_coo, unmasked_R_coo))
mask_csr = mask_coo.tocsr()
unmasked_vals_csr = unmasked_R_coo.multiply(mask_coo)
unmasked_vals_coo = sparse.coo_matrix(unmasked_vals_csr)
unmasked_cold_coo = only_cold_start(masked_R_coo, unmasked_vals_coo)
ks = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15]
run(masked_R_coo, unmasked_vals_coo, unmasked_cold_coo, mask_coo, mask_csr, ks, aug)
| [
"surprise.BaselineOnly",
"numpy.load",
"argparse.ArgumentParser",
"scipy.sparse.vstack",
"time.time",
"scipy.sparse.coo_matrix",
"surprise.NormalPredictor",
"surprise.accuracy.mae",
"multiprocessing.Pool",
"surprise.accuracy.rmse",
"surprise.SVD"
] | [((82, 107), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (105, 107), False, 'import argparse\n'), ((4359, 4395), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['unmasked_vals_csr'], {}), '(unmasked_vals_csr)\n', (4376, 4395), False, 'from scipy import sparse\n'), ((480, 491), 'time.time', 'time.time', ([], {}), '()\n', (489, 491), False, 'import time\n'), ((538, 549), 'time.time', 'time.time', ([], {}), '()\n', (547, 549), False, 'import time\n'), ((879, 890), 'time.time', 'time.time', ([], {}), '()\n', (888, 890), False, 'import time\n'), ((910, 955), 'surprise.accuracy.mae', 'accuracy.mae', (['self.predictions'], {'verbose': '(False)'}), '(self.predictions, verbose=False)\n', (922, 955), False, 'from surprise import accuracy, SVD, NormalPredictor, KNNBasic, BaselineOnly\n'), ((976, 1022), 'surprise.accuracy.rmse', 'accuracy.rmse', (['self.predictions'], {'verbose': '(False)'}), '(self.predictions, verbose=False)\n', (989, 1022), False, 'from surprise import accuracy, SVD, NormalPredictor, KNNBasic, BaselineOnly\n'), ((1192, 1203), 'time.time', 'time.time', ([], {}), '()\n', (1201, 1203), False, 'import time\n'), ((1377, 1388), 'time.time', 'time.time', ([], {}), '()\n', (1386, 1388), False, 'import time\n'), ((1413, 1463), 'surprise.accuracy.mae', 'accuracy.mae', (['self.cold_predictions'], {'verbose': '(False)'}), '(self.cold_predictions, verbose=False)\n', (1425, 1463), False, 'from surprise import accuracy, SVD, NormalPredictor, KNNBasic, BaselineOnly\n'), ((1489, 1540), 'surprise.accuracy.rmse', 'accuracy.rmse', (['self.cold_predictions'], {'verbose': '(False)'}), '(self.cold_predictions, verbose=False)\n', (1502, 1540), False, 'from surprise import accuracy, SVD, NormalPredictor, KNNBasic, BaselineOnly\n'), ((1725, 1736), 'time.time', 'time.time', ([], {}), '()\n', (1734, 1736), False, 'import time\n'), ((2609, 2615), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (2613, 2615), False, 'from multiprocessing import Pool\n'), ((3989, 4039), 'scipy.sparse.vstack', 'sparse.vstack', (['[masked_R_coo, generated_users_coo]'], {}), '([masked_R_coo, generated_users_coo])\n', (4002, 4039), False, 'from scipy import sparse\n'), ((4065, 4117), 'scipy.sparse.vstack', 'sparse.vstack', (['[unmasked_R_coo, generated_users_coo]'], {}), '([unmasked_R_coo, generated_users_coo])\n', (4078, 4117), False, 'from scipy import sparse\n'), ((2231, 2248), 'surprise.NormalPredictor', 'NormalPredictor', ([], {}), '()\n', (2246, 2248), False, 'from surprise import accuracy, SVD, NormalPredictor, KNNBasic, BaselineOnly\n'), ((2295, 2382), 'surprise.BaselineOnly', 'BaselineOnly', ([], {'verbose': '(False)', 'bsl_options': "{'method': 'sgd', 'learning_rate': 5e-05}"}), "(verbose=False, bsl_options={'method': 'sgd', 'learning_rate': \n 5e-05})\n", (2307, 2382), False, 'from surprise import accuracy, SVD, NormalPredictor, KNNBasic, BaselineOnly\n'), ((2421, 2439), 'surprise.SVD', 'SVD', ([], {'verbose': '(False)'}), '(verbose=False)\n', (2424, 2439), False, 'from surprise import accuracy, SVD, NormalPredictor, KNNBasic, BaselineOnly\n'), ((3623, 3671), 'numpy.load', 'np.load', (['generated_users_file'], {'allow_pickle': '(True)'}), '(generated_users_file, allow_pickle=True)\n', (3630, 3671), True, 'import numpy as np\n')] |
import array
import functools
import gzip
import io
import logging
import operator
import struct
import urllib.request
from typing import List, BinaryIO
import matplotlib.pyplot as plt
import numpy as np
import alkymi as alk
# Print alkymi logging to stderr
alk.log.addHandler(logging.StreamHandler())
alk.log.setLevel(logging.DEBUG)
def parse_idx(fd: BinaryIO) -> np.ndarray:
"""
Parse an IDX data file
See: https://github.com/datapythonista/mnist/blob/208174c19a36d6325ea4140ff0182ec591273b67/mnist/__init__.py#L64
"""
DATA_TYPES = {0x08: 'B', # unsigned byte
0x09: 'b', # signed byte
0x0b: 'h', # short (2 bytes)
0x0c: 'i', # int (4 bytes)
0x0d: 'f', # float (4 bytes)
0x0e: 'd'} # double (8 bytes)
header = fd.read(4)
if len(header) != 4:
raise RuntimeError('Invalid IDX file, '
'file empty or does not contain a full header.')
zeros, data_type, num_dimensions = struct.unpack('>HBB', header)
if zeros != 0:
raise RuntimeError('Invalid IDX file, '
'file must start with two zero bytes. '
'Found 0x%02x' % zeros)
try:
data_type = DATA_TYPES[data_type]
except KeyError:
raise RuntimeError('Unknown data type '
'0x%02x in IDX file' % data_type)
dimension_sizes = struct.unpack('>' + 'I' * num_dimensions,
fd.read(4 * num_dimensions))
data = array.array(data_type, fd.read())
data.byteswap() # looks like array.array reads data as little endian
expected_items = functools.reduce(operator.mul, dimension_sizes)
if len(data) != expected_items:
raise RuntimeError('IDX file has wrong number of items. '
'Expected: %d. Found: %d' % (expected_items,
len(data)))
return np.array(data).reshape(dimension_sizes)
@alk.recipe()
def urls() -> List[str]:
train_images_url = "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"
train_labels_url = "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"
test_images_url = "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz"
test_labels_url = "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"
return [train_images_url, train_labels_url, test_images_url, test_labels_url]
@alk.foreach(urls)
def download_gzips(url: str) -> bytes:
return urllib.request.urlopen(url).read()
@alk.foreach(download_gzips)
def parse_gzip_to_arrays(data: bytes) -> np.ndarray:
with io.BytesIO(data) as f:
with gzip.open(f) as gzip_file:
return parse_idx(gzip_file) # type: ignore
def main():
train_images, train_labels, test_images, test_labels = parse_gzip_to_arrays.brew()
plt.imshow(train_images[0], cmap="gray")
plt.title("Digit: {}".format(train_labels[0]))
plt.show()
if __name__ == "__main__":
main()
| [
"alkymi.recipe",
"io.BytesIO",
"matplotlib.pyplot.show",
"gzip.open",
"alkymi.log.setLevel",
"matplotlib.pyplot.imshow",
"logging.StreamHandler",
"struct.unpack",
"functools.reduce",
"numpy.array",
"alkymi.foreach"
] | [((305, 336), 'alkymi.log.setLevel', 'alk.log.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (321, 336), True, 'import alkymi as alk\n'), ((2052, 2064), 'alkymi.recipe', 'alk.recipe', ([], {}), '()\n', (2062, 2064), True, 'import alkymi as alk\n'), ((2511, 2528), 'alkymi.foreach', 'alk.foreach', (['urls'], {}), '(urls)\n', (2522, 2528), True, 'import alkymi as alk\n'), ((2617, 2644), 'alkymi.foreach', 'alk.foreach', (['download_gzips'], {}), '(download_gzips)\n', (2628, 2644), True, 'import alkymi as alk\n'), ((280, 303), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (301, 303), False, 'import logging\n'), ((1037, 1066), 'struct.unpack', 'struct.unpack', (['""">HBB"""', 'header'], {}), "('>HBB', header)\n", (1050, 1066), False, 'import struct\n'), ((1707, 1754), 'functools.reduce', 'functools.reduce', (['operator.mul', 'dimension_sizes'], {}), '(operator.mul, dimension_sizes)\n', (1723, 1754), False, 'import functools\n'), ((2931, 2971), 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_images[0]'], {'cmap': '"""gray"""'}), "(train_images[0], cmap='gray')\n", (2941, 2971), True, 'import matplotlib.pyplot as plt\n'), ((3027, 3037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3035, 3037), True, 'import matplotlib.pyplot as plt\n'), ((2707, 2723), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (2717, 2723), False, 'import io\n'), ((2009, 2023), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2017, 2023), True, 'import numpy as np\n'), ((2743, 2755), 'gzip.open', 'gzip.open', (['f'], {}), '(f)\n', (2752, 2755), False, 'import gzip\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import sqlite3
import datetime
import numpy as np
from . import sql_table_utils as utils
DEFAULT_DATABASE_DIR_NAME = "Crystal_data"
def get_valid_time_stamp():
"""
Get a valid time stamp without illegal characters.
Adds time_ to make the time stamp a valid table name in sql.
:return: String, extracted timestamp
"""
time_stamp = str(datetime.datetime.now())
time_stamp = "time_" + time_stamp.replace("-", "_").replace(":", "_").replace(" ", "_").replace(".", "_")
return time_stamp
class Crystal:
"""
Provides methods to store various types of data onto the database.
docs:
* Creates a new project using the script name if no project name has been provided.
* Creates a new run table for every class instantiation.
"""
def __init__(self, project_name=None, database_dir=None):
"""
Create a crystal instance that could be used to write data onto the database.
:param project_name: str, default -> None, uses the script name the instance is being used from as the
project name.
-> str, uses this name instead.
"""
if project_name is None:
self.called_from = os.path.realpath(sys.argv[0])
self.project_name = os.path.basename(self.called_from)[:-3] # Remove .py
self.project_name = self.project_name.split(".")[0]
else:
# Spaces not allowed for project name
assert len(project_name.split(" ")) < 2, \
"Ensure that you don't have spaces in your variable name, use '_' instead."
self.project_name = project_name
self.time_stamp = get_valid_time_stamp()
self.previous = [None]
if database_dir is None:
# Create a new database on the home directory if not present
home_dir = os.path.expanduser("~")
main_data_dir = os.path.join(home_dir, DEFAULT_DATABASE_DIR_NAME)
if not os.path.exists(main_data_dir):
print("Crystal_data directory not found. Creating a new one...")
os.mkdir(main_data_dir)
else:
utils.dd.set_database_dir(new_database_dir=database_dir)
# Create new project and run tables if not already found
self.conn, self.c = utils.open_data_base_connection(skip_dir_check=True)
self.run_table_name = self.project_name + '_' + 'run_table'
self.c.execute("""CREATE TABLE IF NOT EXISTS main_table (
project_name VARCHAR
)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS {} (
run_name VARCHAR
)""".format(self.run_table_name))
# Add current project and run to the main table and run_table if not already present
# main_table
self.c.execute("""SELECT project_name FROM main_table""")
project_names = np.array(self.c.fetchall()).squeeze()
if self.project_name not in project_names:
self.c.execute("""INSERT INTO main_table (
project_name) VALUES ('{}'
)""".format(self.project_name))
# run_table
self.c.execute("""SELECT run_name FROM {run_table}""".format(run_table=self.run_table_name))
run_names = np.array(self.c.fetchall()).squeeze()
if self.time_stamp not in run_names:
self.c.execute("""INSERT INTO {} (
run_name) VALUES ('{}'
)""".format(self.run_table_name, self.time_stamp))
# variable_table -> time_stamp_table
self.c.execute("""CREATE TABLE IF NOT EXISTS {} (
variable_name VARCHAR, variable_type VARCHAR
)""".format(self.time_stamp))
self.conn.commit()
def scalar(self, value, step, name):
"""
Plot a scalar value.
:param value: int or float, the value on the y-axis
:param step: int or float, the value on the x-axis
:param name: String, the name of the variable to be used during visualization
"""
# Spaces not allowed for scalar variable name
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "scalar_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="scalar"))
else:
self.previous.pop()
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value FLOAT, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, time) VALUES ('{x}', '{y}', '{time}'
)""".format(variable_table_name=self.time_stamp + '_' + name,
x=step, y=value, time=datetime.datetime.now()))
self.conn.commit()
# TODO: Test this
def image(self, image, name):
"""
Show image on the Crystal server.
:param image:
:param name:
:return:
"""
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "image_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="image"))
else:
self.previous.pop()
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
images BLOB, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
images, time) VALUES ('{img}', '{time}'
)""".format(variable_table_name=self.time_stamp + '_' + name,
img=sqlite3.Binary(np.array(image).tobytes()), time=datetime.datetime.now()))
self.conn.commit()
def heatmap(self, value, step, name, value_names=None):
"""
:param value_names:
:param step:
:param value:
:param name:
:return:
"""
assert len(name.split(" ")) < 2, "Ensure that you don't have spaces in your variable name, use '_' instead."
name = "heatmap_" + name
self.previous.append(name)
if self.previous[-1] not in self.previous[:-1]:
self.c.execute("""INSERT INTO {time_stamp_table} (
variable_name, variable_type
) VALUES ('{variable}', '{type}')"""
.format(time_stamp_table=self.time_stamp, variable=name, type="heatmap"))
else:
self.previous.pop()
if value_names is None:
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value ARRAY, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, time) VALUES (?, ?, ?
)""".format(variable_table_name=self.time_stamp + '_' + name),
(step, value, datetime.datetime.now()))
else:
self.c.execute("""CREATE TABLE IF NOT EXISTS {variable_table_name} (
X_value FLOAT, Y_value ARRAY, V_names ARRAY, time VARCHAR
)""".format(variable_table_name=self.time_stamp + '_' + name))
self.c.execute("""INSERT INTO {variable_table_name} (
X_value, Y_value, V_names, time) VALUES (?, ?, ?, ?
)""".format(variable_table_name=self.time_stamp + '_' + name),
(step, value, value_names, datetime.datetime.now()))
self.conn.commit()
def fft(self):
pass
| [
"os.mkdir",
"os.path.join",
"os.path.basename",
"os.path.realpath",
"os.path.exists",
"datetime.datetime.now",
"numpy.array",
"os.path.expanduser"
] | [((494, 517), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (515, 517), False, 'import datetime\n'), ((1398, 1427), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1414, 1427), False, 'import os\n'), ((2045, 2068), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2063, 2068), False, 'import os\n'), ((2097, 2146), 'os.path.join', 'os.path.join', (['home_dir', 'DEFAULT_DATABASE_DIR_NAME'], {}), '(home_dir, DEFAULT_DATABASE_DIR_NAME)\n', (2109, 2146), False, 'import os\n'), ((1460, 1494), 'os.path.basename', 'os.path.basename', (['self.called_from'], {}), '(self.called_from)\n', (1476, 1494), False, 'import os\n'), ((2166, 2195), 'os.path.exists', 'os.path.exists', (['main_data_dir'], {}), '(main_data_dir)\n', (2180, 2195), False, 'import os\n'), ((2294, 2317), 'os.mkdir', 'os.mkdir', (['main_data_dir'], {}), '(main_data_dir)\n', (2302, 2317), False, 'import os\n'), ((5513, 5536), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5534, 5536), False, 'import datetime\n'), ((6848, 6871), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6869, 6871), False, 'import datetime\n'), ((8224, 8247), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8245, 8247), False, 'import datetime\n'), ((8837, 8860), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8858, 8860), False, 'import datetime\n'), ((6815, 6830), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (6823, 6830), True, 'import numpy as np\n')] |
import lorm
from lorm.manif import EuclideanSpace
from lorm.funcs import ManifoldObjectiveFunction
from nfft import nfft
import numpy as np
import copy as cp
class plan(ManifoldObjectiveFunction):
def __init__(self, M, N):
'''
plan for computing the (polynomial) L^2 discrepancy for points measures on the E3 (3d-Torus)
M - number of points
N - polynomial degree
'''
self._M = M
self._N = N
self._nfft_plan = nfft.NFFT3D(M,N,N,N)
self._lambda_hat = np.ones([N,N,N])
for i in range(N):
for j in range(N):
for k in range(N):
norm_squared = (i-N/2)**2+(j-N/2)**2+(k-N/2)**2
self._lambda_hat[i,j,k] = 1./np.power(norm_squared+1,2)
self._mu_hat = np.zeros([N,N,N],dtype=np.complex)
self._mu_hat[int(N/2),int(N/2),int(N/2)] = 1
self._weights = np.ones(M,dtype=np.float)/M
def f(point_array_coords):
err_vec = self._eval_error_vector(point_array_coords)
return np.real(np.sum(err_vec*err_vec.conjugate()*self._lambda_hat))
def grad(point_array_coords):
return np.real(self._eval_grad_error_vector(point_array_coords))
# def hess_mult(base_point_array_coords, tangent_array_coords):
# # approximation
# norm = np.linalg.norm(tangent_array_coords)
# h = 1e-10
# return norm*(grad(base_point_array_coords + h*tangent_array_coords/norm) - grad(base_point_array_coords))/h
ManifoldObjectiveFunction.__init__(self,lorm.manif.EuclideanSpace(3),f,grad=grad,hess_mult=None, parameterized=False)
def hess_mult(self,tangent_vector_array):
hess_mult_vector_array = cp.deepcopy(tangent_vector_array)
base_point_array_coords = hess_mult_vector_array.base_point_array.coords
tangent_array_coords = tangent_vector_array.coords
norm = np.linalg.norm(tangent_array_coords)
h = 1e-7
hess_mult_vector_array.coords[:] = norm*np.real(self._eval_grad_error_vector(base_point_array_coords + h*tangent_array_coords/norm) - self._eval_grad_error_vector(base_point_array_coords))/h
return hess_mult_vector_array
def _eval_error_vector(self,point_array_coords):
self._nfft_plan.x = np.mod(point_array_coords+0.5,1)-0.5
self._nfft_plan.precompute_x()
self._nfft_plan.f[:] = self._weights
self._nfft_plan.adjoint()
err_vec = np.zeros([self._N,self._N,self._N],dtype=np.complex)
err_vec[:] = self._nfft_plan.f_hat[:] - self._mu_hat[:]
return err_vec
def _eval_grad_error_vector(self,point_array_coords):
#self._nfft_plan.x = np.mod(point_array_coords+0.5,1)-0.5
#self._nfft_plan.precompute_x()
grad =np.zeros([self._M,3],dtype=np.complex)
err_vec = self._eval_error_vector(point_array_coords) * self._lambda_hat[:]
#dx
self._nfft_plan.f_hat[:] = err_vec[:]
for i in range(self._N):
self._nfft_plan.f_hat[i,:,:] *= -2*np.pi*1j*(i-self._N/2)
self._nfft_plan.trafo()
grad[:,0] = 2*self._weights*self._nfft_plan.f[:]
#dy
self._nfft_plan.f_hat[:] = err_vec[:]
for i in range(self._N):
self._nfft_plan.f_hat[:,i,:] *= -2*np.pi*1j*(i-self._N/2)
self._nfft_plan.trafo()
grad[:,1] = 2*self._weights*self._nfft_plan.f[:]
#dz
self._nfft_plan.f_hat[:] = err_vec[:]
for i in range(self._N):
self._nfft_plan.f_hat[:,:,i] *= -2*np.pi*1j*(i-self._N/2)
self._nfft_plan.trafo()
grad[:,2] = 2*self._weights*self._nfft_plan.f[:]
return grad
| [
"copy.deepcopy",
"nfft.nfft.NFFT3D",
"lorm.manif.EuclideanSpace",
"numpy.power",
"numpy.zeros",
"numpy.ones",
"numpy.mod",
"numpy.linalg.norm"
] | [((478, 501), 'nfft.nfft.NFFT3D', 'nfft.NFFT3D', (['M', 'N', 'N', 'N'], {}), '(M, N, N, N)\n', (489, 501), False, 'from nfft import nfft\n'), ((526, 544), 'numpy.ones', 'np.ones', (['[N, N, N]'], {}), '([N, N, N])\n', (533, 544), True, 'import numpy as np\n'), ((803, 840), 'numpy.zeros', 'np.zeros', (['[N, N, N]'], {'dtype': 'np.complex'}), '([N, N, N], dtype=np.complex)\n', (811, 840), True, 'import numpy as np\n'), ((1751, 1784), 'copy.deepcopy', 'cp.deepcopy', (['tangent_vector_array'], {}), '(tangent_vector_array)\n', (1762, 1784), True, 'import copy as cp\n'), ((1940, 1976), 'numpy.linalg.norm', 'np.linalg.norm', (['tangent_array_coords'], {}), '(tangent_array_coords)\n', (1954, 1976), True, 'import numpy as np\n'), ((2487, 2542), 'numpy.zeros', 'np.zeros', (['[self._N, self._N, self._N]'], {'dtype': 'np.complex'}), '([self._N, self._N, self._N], dtype=np.complex)\n', (2495, 2542), True, 'import numpy as np\n'), ((2806, 2846), 'numpy.zeros', 'np.zeros', (['[self._M, 3]'], {'dtype': 'np.complex'}), '([self._M, 3], dtype=np.complex)\n', (2814, 2846), True, 'import numpy as np\n'), ((915, 941), 'numpy.ones', 'np.ones', (['M'], {'dtype': 'np.float'}), '(M, dtype=np.float)\n', (922, 941), True, 'import numpy as np\n'), ((1593, 1621), 'lorm.manif.EuclideanSpace', 'lorm.manif.EuclideanSpace', (['(3)'], {}), '(3)\n', (1618, 1621), False, 'import lorm\n'), ((2314, 2349), 'numpy.mod', 'np.mod', (['(point_array_coords + 0.5)', '(1)'], {}), '(point_array_coords + 0.5, 1)\n', (2320, 2349), True, 'import numpy as np\n'), ((753, 782), 'numpy.power', 'np.power', (['(norm_squared + 1)', '(2)'], {}), '(norm_squared + 1, 2)\n', (761, 782), True, 'import numpy as np\n')] |
import sys,argparse,operator
import pandas as pd
import matplotlib.cm as cm
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.text as mtext
from matplotlib import rc
from matplotlib.legend_handler import HandlerPathCollection
from matplotlib.legend import Legend
import functools
from collections import defaultdict
def subtitle_decorator(handler):
@functools.wraps(handler)
def wrapper(legend, orig_handle, fontsize, handlebox):
handle_marker = handler(legend, orig_handle, fontsize, handlebox)
if handle_marker.get_alpha() == 0:
handlebox.set_visible(False)
return wrapper
def plot_clustered_stacked(dfall, binNames, preBinStats, postBinStats, str2col, labels=None, title="Assembly size and classification before/after Strainberry separation"):
#rc('text', usetex=True)
#Adds our decorator to all legend handler functions
for handler in Legend.get_default_handler_map().values():
handler.legend_artist = subtitle_decorator(handler.legend_artist)
n_df = len(dfall)
n_col = len(dfall[0].columns)
n_ind = len(dfall[0].index)
bw=0.4
fig,axe = plt.subplots()
for df in dfall : # for each data frame
axe = df.plot(kind="bar",
width=bw,
linewidth=1,
edgecolor="black",
stacked=True,
ax=axe,
legend=False,
grid=False,
colormap="Set3") # make bar plots
h,l = axe.get_legend_handles_labels() # get the handles we want to modify
for i in range(0, n_df * n_col, n_col): # len(h) = n_col * n_df
df=dfall[i//n_col]
for j, pa in enumerate(h[i:i+n_col]):
for bin_i, rect in enumerate(pa.patches):
if df.iloc[bin_i][j] == 0:
rect.set_alpha(0)
continue
c=str2col[df.index[bin_i]][df.columns[j]]
rect.set_facecolor(plt.cm.Pastel1(c))
rect.set_x(bin_i-(bw*n_df/2)+(bw*i/n_col))
plt.suptitle(title,fontsize=32)
#axe.set_title(title)
xlabels=[]
for binid in df.index:
binCompl=postBinStats[binid]['completeness']
binCoverage=preBinStats[binid]['coverage']
bin_xlab=r'{name} ({coverage}X)'.format(name=' '.join(binNames[binid][0:2]),coverage=int(binCoverage))
#binLabel=r'{name} ({coverage}X)'.format(name=' '.join(binNames[binid][0:2]),coverage=int(binCoverage))
#bin_xlab=r'\textbf{{{name}}}'.format(name=binLabel) if binCompl>=70 else binLabel
#bin_xlab=r'\color{{red}} {name}'.format(name=bin_xlab) if binCoverage<35 else bin_xlab
xlabels.append(bin_xlab)
#xlabels=[ binNames[binid] for binid in df.index ]
#axe.set_xticklabels(xlabels, rotation = 45)
plt.xticks(np.arange(n_ind),xlabels,rotation='vertical')
plt.ylabel('Classified bases (Mbp)',labelpad=50, fontsize=28)
axe.yaxis.set_major_locator(plt.MultipleLocator(1000000))
#axe.yaxis.set_minor_locator(plt.MultipleLocator(500000))
axe.yaxis.set_major_formatter(plt.FuncFormatter(lambda x,y:f'{int(x/1000000)}'))
#axe.grid(which='minor', color='black', alpha=0.2, axis='x')
axe.grid(which='major', color='black', alpha=0.4, axis='y', linestyle="dotted")
# Add invisible data to add another legend
#n=[]
#for i in range(n_df):
# n.append(axe.bar(0,0,color="gray",hatch=H*i))
subtitles=[]
for binid in df.index:
#binLabel=r"\textbf{{ {spName} ({binCov}$\times$) }}".format(spName=binNames[binid],binCov=int(binStats[binid]['coverage']))
binLabel=r"{spName}".format(spName=' '.join(binNames[binid][0:2]))
subtitles.append( mpatches.Patch(label=binLabel, alpha=0) )
for species in df.columns:
if any( dfall[i].loc[binid][species] for i in range(n_df) ):
c=str2col[binid][species]
speciesTokens=species.split('_')
strainLabel='str. '+' '.join(speciesTokens[2:]) if len(speciesTokens[2:]) > 0 else 'str. representative'
speciesLabel=' '.join(speciesTokens) if binNames[binid][0:2] != speciesTokens[0:2] else strainLabel
subtitles.append( mpatches.Patch(facecolor=plt.cm.Pastel1(c), edgecolor='black', label=r"{}".format(speciesLabel) ) )
lgnd=axe.legend(handles=subtitles,loc=[1.03,-0.37],ncol=1)
axe.add_artist(lgnd)
#red_patch = mpatches.Patch(color='red', label='The red data', alpha=0)
#l1 = axe.legend(h[:n_col]+[red_patch], l[:n_col]+['pippo'], loc=[1.02,0])
#if labels is not None:
# l2 = plt.legend(n, labels, loc=[1.01, 0.1])
#axe.add_artist(l1)
return fig,axe
def load_tsv(fname,has_header=False):
bin2sp=defaultdict(lambda:defaultdict(int))
binStats=defaultdict(lambda:defaultdict(float))
spSet=set()
with open(fname,'r') as preFile:
for line in preFile:
if has_header:
has_header=False
continue
cols=line.rstrip().split('\t')
binId=cols[0]
binSize=int(cols[2])
species=cols[3]
spSize=int(cols[5])
bin2sp[binId][species]=spSize
bin2sp[binId]['others']=binSize
binStats[binId]['completeness']=float(cols[7])
binStats[binId]['coverage']=float(cols[10])
spSet.add(species)
for binId in bin2sp:
binDict=bin2sp[binId]
binDict['others']-=sum([binDict[sp] for sp in binDict if sp!='others'])
return bin2sp,spSet,binStats
def main( argv = None ):
# GET PARAMETERS
parser = argparse.ArgumentParser()
parser.add_argument('--pre', dest='preFile', required=True, help='TSV file of stats for the pre-separation bins')
parser.add_argument('--sep', dest='sepFile', required=True, help='TSV file of stats for the post-separation bins')
parser.add_argument('--prefix', dest='prefix', required=True, help='output file prefix')
opt = parser.parse_args()
preStats,preSpSet,preBinStats=load_tsv(opt.preFile,has_header=True)
sepStats,sepSpSet,sepBinStats=load_tsv(opt.sepFile,has_header=True)
binList=list(preStats.keys())
binNames=dict( (binid, max( (x for x in preStats[binid].items() if x[0]!='others'), key=operator.itemgetter(1))[0].split('_')) for binid in preStats.keys() )
for binid in binNames:
#binCoverage=preBinStats[binid]['coverage']
binNames[binid]=[ x for i,x in enumerate(binNames[binid]) if i!=2 or x not in ['str.','str','strain'] ]
spList=sorted(list(preSpSet|sepSpSet))
spList.append('others')
strain2color=defaultdict(lambda:defaultdict(int))
for bid in binList:
blst=[ species for species in spList if preStats[bid][species] > 0 or sepStats[bid][species] > 0 ]
for i,sp in enumerate(blst):
strain2color[bid][sp]=i if sp!='others' else 11
preDF = pd.DataFrame( np.array([ [ preStats[binId][species] for species in spList ] for binId in binList ]), index=binList, columns=spList )
sepDF = pd.DataFrame( np.array([ [ sepStats[binId][species] for species in spList ] for binId in binList ]), index=binList, columns=spList )
#plt.style.use('seaborn')
tex_fonts = {
'figure.figsize' : [23,13],
'font.family' : 'sans-serif',
'font.size' : 18,
'legend.fontsize': 12,
}
plt.rcParams.update(tex_fonts)
fig,ax=plot_clustered_stacked([preDF,sepDF],binNames,preBinStats,sepBinStats,strain2color)
ax.xaxis.set_tick_params(which=u'both',length=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.subplots_adjust(bottom=0.27,left=0.1,right=0.8,top=0.93)
plt.xticks(rotation=50, ha='right')
plt.savefig(f'{opt.prefix}.pdf')
plt.savefig(f'{opt.prefix}.svg')
#plt.show()
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.cm.Pastel1",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.MultipleLocator",
"collections.defaultdict",
"matplotlib.pyplot.rcParams.update",
"numpy.arange",
"numpy.array",
"functools.wraps",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.... | [((415, 439), 'functools.wraps', 'functools.wraps', (['handler'], {}), '(handler)\n', (430, 439), False, 'import functools\n'), ((1193, 1207), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1205, 1207), True, 'import matplotlib.pyplot as plt\n'), ((2156, 2188), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'fontsize': '(32)'}), '(title, fontsize=32)\n', (2168, 2188), True, 'import matplotlib.pyplot as plt\n'), ((2974, 3036), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Classified bases (Mbp)"""'], {'labelpad': '(50)', 'fontsize': '(28)'}), "('Classified bases (Mbp)', labelpad=50, fontsize=28)\n", (2984, 3036), True, 'import matplotlib.pyplot as plt\n'), ((5725, 5750), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5748, 5750), False, 'import sys, argparse, operator\n'), ((7480, 7510), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['tex_fonts'], {}), '(tex_fonts)\n', (7499, 7510), True, 'import matplotlib.pyplot as plt\n'), ((7752, 7815), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.27)', 'left': '(0.1)', 'right': '(0.8)', 'top': '(0.93)'}), '(bottom=0.27, left=0.1, right=0.8, top=0.93)\n', (7771, 7815), True, 'import matplotlib.pyplot as plt\n'), ((7817, 7852), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(50)', 'ha': '"""right"""'}), "(rotation=50, ha='right')\n", (7827, 7852), True, 'import matplotlib.pyplot as plt\n'), ((7857, 7889), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{opt.prefix}.pdf"""'], {}), "(f'{opt.prefix}.pdf')\n", (7868, 7889), True, 'import matplotlib.pyplot as plt\n'), ((7894, 7926), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{opt.prefix}.svg"""'], {}), "(f'{opt.prefix}.svg')\n", (7905, 7926), True, 'import matplotlib.pyplot as plt\n'), ((2923, 2939), 'numpy.arange', 'np.arange', (['n_ind'], {}), '(n_ind)\n', (2932, 2939), True, 'import numpy as np\n'), ((3068, 3096), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(1000000)'], {}), '(1000000)\n', (3087, 3096), True, 'import matplotlib.pyplot as plt\n'), ((7026, 7111), 'numpy.array', 'np.array', (['[[preStats[binId][species] for species in spList] for binId in binList]'], {}), '([[preStats[binId][species] for species in spList] for binId in\n binList])\n', (7034, 7111), True, 'import numpy as np\n'), ((7171, 7256), 'numpy.array', 'np.array', (['[[sepStats[binId][species] for species in spList] for binId in binList]'], {}), '([[sepStats[binId][species] for species in spList] for binId in\n binList])\n', (7179, 7256), True, 'import numpy as np\n'), ((956, 988), 'matplotlib.legend.Legend.get_default_handler_map', 'Legend.get_default_handler_map', ([], {}), '()\n', (986, 988), False, 'from matplotlib.legend import Legend\n'), ((3814, 3853), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'label': 'binLabel', 'alpha': '(0)'}), '(label=binLabel, alpha=0)\n', (3828, 3853), True, 'import matplotlib.patches as mpatches\n'), ((4865, 4881), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4876, 4881), False, 'from collections import defaultdict\n'), ((4915, 4933), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (4926, 4933), False, 'from collections import defaultdict\n'), ((6753, 6769), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (6764, 6769), False, 'from collections import defaultdict\n'), ((2069, 2086), 'matplotlib.pyplot.cm.Pastel1', 'plt.cm.Pastel1', (['c'], {}), '(c)\n', (2083, 2086), True, 'import matplotlib.pyplot as plt\n'), ((4351, 4368), 'matplotlib.pyplot.cm.Pastel1', 'plt.cm.Pastel1', (['c'], {}), '(c)\n', (4365, 4368), True, 'import matplotlib.pyplot as plt\n'), ((6383, 6405), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (6402, 6405), False, 'import sys, argparse, operator\n')] |
# height from 9 to 0
# four adjacent locations (up, down, left, and right)
# 21---43210
# 3-878-4-21
# -85678-8-2
# 87678-678-
# -8---65678
import numpy as np
from scipy import ndimage
lines = open("input.txt", "r").readlines()
lines = [line[:-1] for line in lines]
lines = [list(int(item) for item in list(line)) for line in lines]
# part 1
grid = np.pad(lines, 1, mode="constant", constant_values=10) # surrounds the 2d array with tens
# print(grid)
totalsum = 0
for (x, y), item in np.ndenumerate(grid):
# filters the side panels
if x != 0 and x != len(grid)-1 and y != 0 and y != len(grid[x])-1:
if item < grid[x-1][y] and item < grid[x+1][y] and item < grid[x][y-1] and item < grid[x][y+1]:
totalsum += item + 1
print("What is the sum of the risk levels of all low points on your heightmap?", totalsum)
# part 2
grid = np.where(np.array(lines) < 9, 1, 0)
labeled_array, num_features = ndimage.label(grid) # default structure [[0,1,0], [1,1,1], [0,1,0]]
# print(labeled_array)
basin = np.zeros((num_features))
for (x, y), item in np.ndenumerate(labeled_array):
if (item != 0):
basin[item - 1] += 1
basin = sorted(basin)
prod = np.prod(basin[-3:])
print("What do you get if you multiply together the sizes of the three largest basins?", int(prod))
| [
"numpy.pad",
"numpy.ndenumerate",
"numpy.zeros",
"scipy.ndimage.label",
"numpy.array",
"numpy.prod"
] | [((353, 406), 'numpy.pad', 'np.pad', (['lines', '(1)'], {'mode': '"""constant"""', 'constant_values': '(10)'}), "(lines, 1, mode='constant', constant_values=10)\n", (359, 406), True, 'import numpy as np\n'), ((491, 511), 'numpy.ndenumerate', 'np.ndenumerate', (['grid'], {}), '(grid)\n', (505, 511), True, 'import numpy as np\n'), ((926, 945), 'scipy.ndimage.label', 'ndimage.label', (['grid'], {}), '(grid)\n', (939, 945), False, 'from scipy import ndimage\n'), ((1028, 1050), 'numpy.zeros', 'np.zeros', (['num_features'], {}), '(num_features)\n', (1036, 1050), True, 'import numpy as np\n'), ((1073, 1102), 'numpy.ndenumerate', 'np.ndenumerate', (['labeled_array'], {}), '(labeled_array)\n', (1087, 1102), True, 'import numpy as np\n'), ((1183, 1202), 'numpy.prod', 'np.prod', (['basin[-3:]'], {}), '(basin[-3:])\n', (1190, 1202), True, 'import numpy as np\n'), ((869, 884), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (877, 884), True, 'import numpy as np\n')] |
import array
import struct
import numpy as np
from PIL import Image
class MNIST:
""" MNIST dataset is composed of digit images of size 28x28 and its labels """
def __init__(self, data_dir):
self.train_data, self.train_labels = self.parse_images(data_dir + '/train-images-idx3-ubyte'), \
self.parse_labels(data_dir + '/train-labels-idx1-ubyte')
self.test_data, self.test_labels = self.parse_images(data_dir + '/t10k-images-idx3-ubyte'), \
self.parse_labels(data_dir + '/t10k-labels-idx1-ubyte')
@staticmethod
def parse_images(filename):
with open(filename, 'rb') as f:
magic, items, rows, cols = struct.unpack('>IIII', f.read(16))
assert magic == 2051 and rows == 28 and cols == 28
images = array.array('B', f.read())
assert items * rows * cols == len(images)
return np.array(images, dtype=np.int8).reshape((items, cols, rows), order='C')
@staticmethod
def parse_labels(filename):
with open(filename, 'rb') as f:
magic, items = struct.unpack('>II', f.read(8))
assert magic == 2049
labels = array.array('B', f.read())
assert len(labels) == items
return np.array(labels, dtype=np.int8).reshape((items, 1))
@staticmethod
def display(array):
image = Image.fromarray(array)
scaled_shape = tuple([8 * i for i in array.shape])
image = image.resize(scaled_shape)
image.show()
mnist = MNIST('./data')
if __name__ == '__main__':
example = mnist.train_data[41, :, :]
print(example.shape)
mnist.display(example)
| [
"PIL.Image.fromarray",
"numpy.array"
] | [((1433, 1455), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (1448, 1455), False, 'from PIL import Image\n'), ((960, 991), 'numpy.array', 'np.array', (['images'], {'dtype': 'np.int8'}), '(images, dtype=np.int8)\n', (968, 991), True, 'import numpy as np\n'), ((1322, 1353), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.int8'}), '(labels, dtype=np.int8)\n', (1330, 1353), True, 'import numpy as np\n')] |
import numpy as np
import time
from random import sample
from some_bandits.utilities import save_to_pickle, load_from_pickle, truncate, convert_conf, calculate_utility
from some_bandits.bandit_options import bandit_args
from some_bandits.bandits.Bandit import Bandit
#formula = "ao"
FORMULA_FUNC = None
CUM_REWARD = 0
CUM_SQ_REWARD = 1
N_K = 2
trace_len = 15000 #the total time of chosen trace in SWIM in seconds
horizon = round(trace_len / 60)
class UCBImproved(Bandit):
def __init__(self, formula = None):
super().__init__("UCB-Improved")
self.removable_arms = [arm for arm in self.arms]
self.arm_reward_pairs = {}
for arm in self.arms: self.arm_reward_pairs[arm] = [0.0,0.0,0.0]
self.last_action = bandit_args["initial_configuration"]
self.delta_m = 1.0
def start_strategy(self, reward):
if(len(self.removable_arms) == 1):
#print("converged")
return self.removable_arms[0]
self.arm_reward_pairs[self.last_action][CUM_REWARD]+=reward
self.arm_reward_pairs[self.last_action][CUM_SQ_REWARD]+=np.square(reward)
self.arm_reward_pairs[self.last_action][N_K]+=1
delta_sq = np.square(self.delta_m)
n_m = np.ceil( (2 * np.log(horizon * delta_sq))/ delta_sq )
for arm in self.removable_arms:
if self.arm_reward_pairs[arm][N_K] < n_m:
#print("Explore phase")
self.last_action = arm
return arm
fac = np.sqrt(np.log(horizon * delta_sq) / (2 * n_m))
pair_avgs = [self.arm_reward_pairs[arm][CUM_REWARD]/self.arm_reward_pairs[arm][N_K] \
for arm in self.removable_arms]
del_boundary = max([pair_avg - fac for pair_avg in pair_avgs])
del_candidates = []
#print("del boundary")
#print(del_boundary)
for avg_i, arm_avg in enumerate(pair_avgs):
#print("less than boundary? ")
#print(arm_avg + fac)
if (arm_avg + fac) < del_boundary:
del_candidates.append(self.removable_arms[avg_i])
#print("to be removed")
#print(del_candidates)
for candidate in del_candidates: self.removable_arms.remove(candidate)
self.delta_m = self.delta_m/2
return self.start_strategy(reward) | [
"numpy.square",
"numpy.log"
] | [((1147, 1164), 'numpy.square', 'np.square', (['reward'], {}), '(reward)\n', (1156, 1164), True, 'import numpy as np\n'), ((1249, 1272), 'numpy.square', 'np.square', (['self.delta_m'], {}), '(self.delta_m)\n', (1258, 1272), True, 'import numpy as np\n'), ((1567, 1593), 'numpy.log', 'np.log', (['(horizon * delta_sq)'], {}), '(horizon * delta_sq)\n', (1573, 1593), True, 'import numpy as np\n'), ((1302, 1328), 'numpy.log', 'np.log', (['(horizon * delta_sq)'], {}), '(horizon * delta_sq)\n', (1308, 1328), True, 'import numpy as np\n')] |
"""
Created on Friday March 15 16:22 2019
tools to work with tiffs from the GeoTek RXCT scanner
@author: <NAME>
"""
import os
import sys
import glob
import tkinter
from tkinter import filedialog
import numpy as np
import xml.etree.ElementTree
import tifffile
from skimage.transform import downscale_local_mean
from skimage import img_as_ubyte
import matplotlib as matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import warnings
from corescan_plotting import ct
###############################################################################
def linescan_xml(filename=''):
"""
read in GeoTek linescan xml data to a dictionary
"""
## Get directory if not specified in function call
if not filename:
tk_root = tkinter.Tk()
tk_root.wm_withdraw()
filename = filedialog.askopenfilename()
tk_root.destroy()
if not filename:
sys.exit()
fname = filename
dname = os.path.dirname(fname)
## Parse the xml file
tree = xml.etree.ElementTree.parse(fname)
root = tree.getroot()
## Add element tags and attributes to a dictionary
dic = {}
for elem in root.iter():
try:
if isinteger(elem.text) is True:
dic[elem.tag] = int(elem.text)
elif isfloat(elem.text) is True:
dic[elem.tag] = float(elem.text)
else:
dic[elem.tag] = elem.text
except: TypeError
return dic
###############################################################################
def linescan_in(filename='',xml_fname=''):
"""
read in linescan data from from tif file
"""
## Get filename if not specified in function call
if not filename:
tk_root = tkinter.Tk()
tk_root.wm_withdraw()
filename = filedialog.askopenfilename()
tk_root.destroy()
if not filename:
sys.exit()
im = tifffile.imread(filename)
if np.size(np.shape(im)) > 2: # normalize rgb bands to 0.0 to 1.0
im = bands_to_rgb(im)
# Determine the directory of the file
directory = os.path.dirname(filename)
## Read xml file
if not xml_fname:
xml_fname = glob.glob(os.path.splitext(filename)[0]+'*.xml')[0]
xml_dic = linescan_xml(xml_fname)
return im, xml_dic
###############################################################################
def linescan_plot(ls_data, ls_xml):
"""
plot a linescan image, using xml data to generate scale
"""
## Downscale from 16 bit tif to 8 bit
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = img_as_ubyte(ls_data)
## Get screen size for figure
root = tkinter.Tk()
pix2in = root.winfo_fpixels('1i')
screen_width = root.winfo_screenwidth()/pix2in
screen_height = root.winfo_screenheight()/pix2in
image_h2w = round(ls_xml['physical-height']/ls_xml['physical-width'])
fig = plt.figure(figsize=(screen_height/image_h2w, screen_height))
## Plot images
aspect = 'equal'
ax = plt.subplot(1, 1, 1)
plt.imshow(im, aspect=aspect, extent=(0,ls_xml['physical-width'],\
ls_xml['physical-top']+ls_xml['physical-height'],\
ls_xml['physical-top']))
ax.set_title(ls_xml['coreID'])
ax.yaxis.set_major_locator(MultipleLocator(10))
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.xaxis.set_major_locator(MultipleLocator(1))
return fig
###############################################################################
def dpi_ls_plot(fig,ls_xml):
"""
calculate a dpi to preserve the resolution of the original image
"""
## Calculate the new dpi from the original resolution and new image size
orig_h_pixels = ls_xml['scan-lines']
orig_w_pixels = ls_xml['pixel-width']
bbox = fig.axes[0].get_window_extent().transformed(fig.dpi_scale_trans.inverted())
new_w_in, new_h_in = bbox.width, bbox.height
## Average the new dpis
new_dpi = np.round(np.average([orig_w_pixels/new_w_in,
orig_h_pixels/new_h_in]))
## Calculate
return new_dpi
###############################################################################
def linescan_histogram(ls_data):
"""
plot a histogram of the linescan intensities
"""
## Downscale from 16 bit tif to 8 bit
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = img_as_ubyte(ls_data)
## Calculate histogram, uses 500 bins
hist, bins = np.histogram(im,bins=100)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:])/2
## Plot histogram
fig = plt.figure(figsize=(10,10))
plt.bar(center, hist, align='center',width=width)
plt.xlabel('Intensity')
plt.ylabel('Count')
return fig
###############################################################################
def isfloat(x):
"""
determine if a string can be converted to float
"""
try:
a = float(x)
except ValueError:
return False
else:
return True
###############################################################################
def isinteger(x):
"""
determine if a string can be converted to an integer
"""
try:
a = int(x)
except ValueError:
return False
else:
return True
###############################################################################
def cm2inch(*tupl):
"""
convert centimeters to inches
"""
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
###############################################################################
def get_ax_size(ax):
"""
get the size of a matplotlib figure axis in inches
"""
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
return width,height
###############################################################################
def normalize(array):
"""
Normalizes numpy arrays into scale 0.0 - 1.0
"""
array_min, array_max = array.min(), array.max()
return ((array-array_min)/(array_max-array_min))
###############################################################################
def bands_to_rgb(rgb_array):
"""
Takes a m x n x 3 array (assumes in order of R, G, B), normalizes values to 0.0
- 1.0, and then stacks into one array for plotting in imshow
"""
r = normalize(rgb_array[:,:,0])
g = normalize(rgb_array[:,:,1])
b = normalize(rgb_array[:,:,2])
return np.dstack((r,g,b))
################################################################################
def crop_custom(ls_data,ls_xml,units='cm',bbox=None,plot=False):
"""
extract a subset of the input image using a bounding box defined by:
[x0,x1,y0,y1] where x0,y0 are top left. x1,y1 are bottom right.
by default, coordinates are in centimeters, but can be defined in pixels if
units='pixels'.
"""
if bbox == None:
print('need to define bbox, see help')
return
else:
x0,x1,y0,y1 = bbox[0],bbox[1],bbox[2],bbox[3]
# convert from cm to pixels and visa versa if necessary
cm2pix = ls_xml['pixels-per-CM']
top = ls_xml['physical-top']
if units == 'cm':
xp0,xp1 = int(x0*cm2pix),int(x1*cm2pix)
yp0 = int(np.abs(top-y0)*cm2pix)
yp1 = int(np.abs(top-y1)*cm2pix)
elif units == 'pixels':
xp0,xp1,yp0,yp1 = x0,x1,y0,y1
x0,x1 = xp0/cm2pix,xp1/cm2pix
y0,y1 = top+yp0/cm2pix,top+yp1/cm2pix
## Extract
ls_crop = ls_data[yp0-1:yp1-1,xp0-1:xp1-1]
## Plot original
if plot == True:
fig, (ax1,ax2) = plt.subplots(1,2)
ax1.imshow(ls_data, aspect='equal', extent=(0,ls_xml['physical-width'],\
ls_xml['physical-top']+ls_xml['physical-height'],\
ls_xml['physical-top']))
ax1.plot([x0,x0,x1,x1,x0],[y0,y1,y1,y0,y0],'ro-')
ax1.set_title('original')
ax2.imshow(ls_crop, aspect='equal',extent=(x0,x1,y1,y0))
ax2.set_ylim(y1,y0)
ax2.set_title('cropped')
## Update xml so that new images scale correctly, list of dictionaries
xml = ls_xml.copy()
xml['physical-width'] = x1-x0
xml['physical-height'] = y1-y0
xml['pixel-width'] = ls_crop.shape[1]
xml['scan-lines'] = ls_crop.shape[0]
xml['physical-top'] = y0
xml['coreID'] = str("%s %d-%d cm"
%(xml['coreID'],
xml['physical-top'],
xml['physical-top']+xml['physical-height']))
return ls_crop,xml
| [
"numpy.abs",
"matplotlib.pyplot.bar",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.histogram",
"warnings.simplefilter",
"matplotlib.pyplot.imshow",
"os.path.dirname",
"tkinter.filedialog.askopenfilename",
"warnings.catch_warnings",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.s... | [((1050, 1072), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (1065, 1072), False, 'import os\n'), ((2020, 2045), 'tifffile.imread', 'tifffile.imread', (['filename'], {}), '(filename)\n', (2035, 2045), False, 'import tifffile\n'), ((2204, 2229), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (2219, 2229), False, 'import os\n'), ((2797, 2809), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (2807, 2809), False, 'import tkinter\n'), ((3036, 3098), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(screen_height / image_h2w, screen_height)'}), '(figsize=(screen_height / image_h2w, screen_height))\n', (3046, 3098), True, 'import matplotlib.pyplot as plt\n'), ((3146, 3166), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (3157, 3166), True, 'import matplotlib.pyplot as plt\n'), ((3171, 3319), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {'aspect': 'aspect', 'extent': "(0, ls_xml['physical-width'], ls_xml['physical-top'] + ls_xml[\n 'physical-height'], ls_xml['physical-top'])"}), "(im, aspect=aspect, extent=(0, ls_xml['physical-width'], ls_xml[\n 'physical-top'] + ls_xml['physical-height'], ls_xml['physical-top']))\n", (3181, 3319), True, 'import matplotlib.pyplot as plt\n'), ((4633, 4659), 'numpy.histogram', 'np.histogram', (['im'], {'bins': '(100)'}), '(im, bins=100)\n', (4645, 4659), True, 'import numpy as np\n'), ((4767, 4795), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4777, 4795), True, 'import matplotlib.pyplot as plt\n'), ((4799, 4849), 'matplotlib.pyplot.bar', 'plt.bar', (['center', 'hist'], {'align': '"""center"""', 'width': 'width'}), "(center, hist, align='center', width=width)\n", (4806, 4849), True, 'import matplotlib.pyplot as plt\n'), ((4853, 4876), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Intensity"""'], {}), "('Intensity')\n", (4863, 4876), True, 'import matplotlib.pyplot as plt\n'), ((4881, 4900), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (4891, 4900), True, 'import matplotlib.pyplot as plt\n'), ((6743, 6763), 'numpy.dstack', 'np.dstack', (['(r, g, b)'], {}), '((r, g, b))\n', (6752, 6763), True, 'import numpy as np\n'), ((852, 864), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (862, 864), False, 'import tkinter\n'), ((914, 942), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (940, 942), False, 'from tkinter import filedialog\n'), ((1846, 1858), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (1856, 1858), False, 'import tkinter\n'), ((1908, 1936), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (1934, 1936), False, 'from tkinter import filedialog\n'), ((2650, 2675), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2673, 2675), False, 'import warnings\n'), ((2685, 2716), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2706, 2716), False, 'import warnings\n'), ((2730, 2751), 'skimage.img_as_ubyte', 'img_as_ubyte', (['ls_data'], {}), '(ls_data)\n', (2742, 2751), False, 'from skimage import img_as_ubyte\n'), ((3428, 3447), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (3443, 3447), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((3480, 3498), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (3495, 3498), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((3531, 3549), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (3546, 3549), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((4108, 4172), 'numpy.average', 'np.average', (['[orig_w_pixels / new_w_in, orig_h_pixels / new_h_in]'], {}), '([orig_w_pixels / new_w_in, orig_h_pixels / new_h_in])\n', (4118, 4172), True, 'import numpy as np\n'), ((4472, 4497), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4495, 4497), False, 'import warnings\n'), ((4507, 4538), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4528, 4538), False, 'import warnings\n'), ((4552, 4573), 'skimage.img_as_ubyte', 'img_as_ubyte', (['ls_data'], {}), '(ls_data)\n', (4564, 4573), False, 'from skimage import img_as_ubyte\n'), ((7878, 7896), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (7890, 7896), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1016), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1014, 1016), False, 'import sys\n'), ((2000, 2010), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2008, 2010), False, 'import sys\n'), ((2061, 2073), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (2069, 2073), True, 'import numpy as np\n'), ((7533, 7549), 'numpy.abs', 'np.abs', (['(top - y0)'], {}), '(top - y0)\n', (7539, 7549), True, 'import numpy as np\n'), ((7574, 7590), 'numpy.abs', 'np.abs', (['(top - y1)'], {}), '(top - y1)\n', (7580, 7590), True, 'import numpy as np\n'), ((2303, 2329), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2319, 2329), False, 'import os\n')] |
#!/usr/bin/env python
import sys, os
import numpy as np
from scipy.signal import find_peaks
from math import ceil
from pylab import detrend,fft,savefig
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from scipy.signal import blackman as blk
from glob import glob
import pandas as pd
def fig_dir(f):
#a = f.split('/')[0]
#return f'fig/{a:s}/'
return f'fig/'
def long_basename(f):
return f.split('/')[-1]
def ts_basename(f):
return f.split('_NtsT')[0]
def fft_basename(f):
return f.replace('ts_','fft_')
def orbit_basename(f):
return f.replace('ts_','orbit_')
def parse_token(bn,token):
return bn.split(token)[-1].split('_')[0]
def pktopkAmp(S,M=0,F=0.9):
"""
Help here
"""
if M == 0:
M = len(S)
thresUp = np.mean(S)+F*(np.max(S)-np.mean(S))
thresDwn = np.mean(S)-F*(np.mean(S)-np.min(S))
peaks, _ = find_peaks(S[-M:], height=thresUp)
valleys, _ = find_peaks(-S[-M:],height=-thresDwn)
pkpkAmp = np.mean(S[-M:][peaks]) - np.mean(S[-M:][valleys])
relErr = ((np.std(S[-M:][peaks])**2 + np.std(S[-M:][valleys])**2)**0.5)/pkpkAmp
return (pkpkAmp, relErr)
def findIndex(df,Bo,Re,alpha,wf):
cond = ( (df['Re']==Re) & (df['Bo']==Bo) & (df['alpha']==alpha) &
(df['w_f']==wf) )
return df.index[cond].tolist()
def addRow(df,runs,Bo,Re,alpha,wf,NtsT,NT,wFFT,AEk,AvgEk):
df = df.append({'runs_#':runs, 'Bo':Bo, 'Re':Re, 'alpha':alpha, 'w_f':wf,
'NtsT':NtsT, 'NT':NT, 'w*':wFFT, 'stdEk': AEk, 'AvgEk': AvgEk}, ignore_index=True)
return df
def replaceRow(df,index,runs,Bo,Re,alpha,wf,NtsT,NT,wFFT,stdEk,AvgEk):
df.loc[index,['runs_#','Bo','Re','alpha','w_f','NtsT','NT','w*',
'stdEk','AvgEk']]=[runs,Bo,Re,alpha,wf,NtsT,NT,wFFT,stdEk,AvgEk]
return None
def collectData(DAT_DIR,infiles,outfile):
df = pd.DataFrame(columns=['runs_#','Bo','Re','alpha','w_f','NtsT',
'NT','w*','stdEk','AvgEk'])
if os.path.exists(DAT_DIR+outfile):
df = pd.read_csv(DAT_DIR+outfile, sep=' ', dtype=object)
for infile in glob(DAT_DIR+infiles):
with open(infile,'r') as f:
f.readline()
try:
params = f.readline().strip('\n').split()
runs = params[0]
Bo = params[1]
Re = params[2]
alpha = params[3]
wf = params[4]
NtsT = params[5]
NT = params[6]
wFFT = params[7]
stdEk = params[8]
AvgEk = params[9]
except Exception as ex:
print('Exception reading line: ', ex)
filterIndex = findIndex(df,Bo,Re,alpha,wf)
if filterIndex and runs >= df.loc[filterIndex,'runs_#'].values:
replaceRow(df,filterIndex,runs,Bo,Re,alpha,wf,NtsT,NT,wFFT,stdEk,AvgEk)
elif not filterIndex:
df = addRow(df,runs,Bo,Re,alpha,wf,NtsT,NT,wFFT,stdEk,AvgEk)
f.close()
os.remove(infile)
with open(DAT_DIR+outfile,'w') as outfile:
df.to_csv(outfile,header=True,index=False,sep=' ')
outfile.close()
return None
def main():
f = sys.argv[1]
res_dir = sys.argv[2]
dtinput = sys.argv[3]
FIG_DIR = fig_dir(res_dir)
DAT_DIR = f'dat/'
longbn = long_basename(f)
tsbn = ts_basename(longbn)
fftbn = fft_basename(tsbn)
orbitbn = orbit_basename(tsbn)
tokens = ['Re','Bo','alpha','wf','NtsT','NT']
try:
values = [ parse_token(longbn,token) for token in tokens ]
Re = values[0]
Bo = values[1]
alpha= values[2]
wf = values[3] # Forcing Angular Freq
NtsT = int(values[4])
NT = int(values[5])
runs = f.split('runs_')[-1].split('/')[0]
except Exception as ex:
print('Exception in parse token: ', ex)
if float(wf)>0:
Period = 2*np.pi/float(wf)
dt = Period/NtsT
Nsteps = float(NT*NtsT)
else:
print('wf not greater than 0. wf = ', wf) # COMPLETE THIS!
# if TU<0:
# Nsteps = -TU
# if wf!=0:
# dt = float(1/(wf*Nsteps))
# elif wf==0:
# dt = float(dtinput)
# else:
# dt = float(dtinput)
# Nsteps = float(TU/dt)
title_string = longbn.replace('_',' ')
t,Ek,Eg,Ew,ur,uw,uz = np.loadtxt(f).T
os.makedirs(FIG_DIR,exist_ok=True)
#############
# Plot ffts #
#############
P = 100
M = int(len(Ek)*P/100)
T = M*dt # Period ??
w0 = 2*np.pi/T # Natural Frequency??
AEk = Ek[-M:].std() # Amplitud of Oscillation
fftEk = abs(fft(detrend(Ek[-M:])*blk(M))[:M//2]) # FFT with Blackman filter [array]
wMEk = w0*fftEk.argmax() # Compute dominant frequency
AEg = Eg[-M:].std() # Amplitud of Oscillation
fftEg = abs(fft(detrend(Eg[-M:])*blk(M))[:M//2]) # FFT with Blackman filter [array]
wMEg = w0*fftEg.argmax() # Compute dominant frequency
AEw = Ew[-M:].std() # Amplitud of Oscillation
fftEw = abs(fft(detrend(Ew[-M:])*blk(M))[:M//2]) # FFT with Blackman filter [array]
wMEw = w0*fftEw.argmax() # Compute dominant frequency
Aur = ur[-M:].std() # Amplitud of Oscillation
fftur = abs(fft(detrend(ur[-M:])*blk(M))[:M//2]) # FFT with Blackman filter [array]
wMur = w0*fftur.argmax() # Compute dominant frequency
Auw = uw[-M:].std() # Amplitud of Oscillation
fftuw = abs(fft(detrend(uw[-M:])*blk(M))[:M//2]) # FFT with Blackman filter [array]
wMuw = w0*fftuw.argmax() # Compute dominant frequency
Auz = uz[-M:].std() # Amplitud of Oscillation
fftuz = abs(fft(detrend(uz[-M:])*blk(M))[:M//2]) # FFT with Blackman filter [array]
wMuz = w0*fftuz.argmax() # Compute dominant frequency
wFFT = min([wMEk,wMEg,wMEw,wMur,wMuw,wMuz])
wLim = 2
AnotationSize = 15
xPosText = 0.25
yPosText = 0.92
ticksize = 12
labelsize = 18
labelpadx = 3
labelpady = 16
fig, axes = plt.subplots(nrows=2,ncols=3,figsize=(14,9)) # Create canvas & axes
## Global Kinetic Energy FFT
axes[0,0].semilogy(w0*np.arange(len(fftEk)),fftEk,'k-')
axes[0,0].annotate('$\omega^*$ = {:f}'.format(wMEk), xy=(wMEk, fftEk.max()),
xycoords='data', xytext=(xPosText,yPosText), textcoords='axes fraction',
size=AnotationSize, arrowprops=dict(arrowstyle="->"))
axes[0,0].set_xlabel('$\omega$',fontsize=labelsize,labelpad=labelpadx)
axes[0,0].set_ylabel('$|\hat{E}_k|$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[0,0].set_xlim(0,wLim)
axes[0,0].tick_params(labelsize=ticksize)
## Global Angular Momentum FFT
axes[0,1].semilogy(w0*np.arange(len(fftEw)),fftEw,'k-')
axes[0,1].annotate('$\omega^*$ = {:f}'.format(wMEw), xy=(wMEw, fftEw.max()),
xycoords='data', xytext=(xPosText,yPosText), textcoords='axes fraction',
size=AnotationSize, arrowprops=dict(arrowstyle="->"))
axes[0,1].set_xlabel('$\omega$',fontsize=labelsize,labelpad=labelpadx)
axes[0,1].set_ylabel('$|\hat{E}_w|$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[0,1].set_xlim(0,wLim)
axes[0,1].tick_params(labelsize=ticksize)
## Global Enstrophy FFT
axes[0,2].semilogy(w0*np.arange(len(fftEg)),fftEg,'k-')
axes[0,2].annotate('$\omega^*$ = {:f}'.format(wMEg), xy=(wMEg, fftEk.max()),
xycoords='data', xytext=(xPosText,yPosText), textcoords='axes fraction',
size=AnotationSize, arrowprops=dict(arrowstyle="->"))
axes[0,2].set_xlabel('$\omega$',fontsize=labelsize,labelpad=labelpadx)
axes[0,2].set_ylabel('$|\hat{E}_{\gamma}|$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[0,2].set_xlim(0,wLim)
axes[0,2].tick_params(labelsize=ticksize)
## Local Radial Velocity FFT
axes[1,0].semilogy(w0*np.arange(len(fftur)),fftur,'k-')
axes[1,0].annotate('$\omega^*$ = {:f}'.format(wMur), xy=(wMur, fftur.max()),
xycoords='data', xytext=(xPosText,yPosText), textcoords='axes fraction',
size=AnotationSize, arrowprops=dict(arrowstyle="->"))
axes[1,0].set_xlabel('$\omega$',fontsize=labelsize,labelpad=labelpadx)
axes[1,0].set_ylabel('$|\hat{u}_r|$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[1,0].set_xlim(0,wLim)
axes[1,0].tick_params(labelsize=ticksize)
## Local Azimuthal Velocity FFT
axes[1,1].semilogy(w0*np.arange(len(fftuw)),fftuw,'k-')
axes[1,1].annotate('$\omega^*$ = {:f}'.format(wMuw), xy=(wMuw, fftuw.max()),
xycoords='data', xytext=(xPosText,yPosText), textcoords='axes fraction',
size=AnotationSize, arrowprops=dict(arrowstyle="->"))
axes[1,1].set_xlabel('$\omega$',fontsize=labelsize,labelpad=labelpadx)
axes[1,1].set_ylabel(r'$|\hat{u}_{\theta}|$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[1,1].set_xlim(0,wLim)
axes[1,1].tick_params(labelsize=ticksize)
## Local Axial Velocity FFT
axes[1,2].semilogy(w0*np.arange(len(fftuz)),fftuz,'k-')
axes[1,2].annotate('$\omega^*$ = {:f}'.format(wMuz), xy=(wMuz, fftuz.max()),
xycoords='data', xytext=(xPosText,yPosText), textcoords='axes fraction',
size=AnotationSize, arrowprops=dict(arrowstyle="->"))
axes[1,2].set_xlabel('$\omega$',fontsize=labelsize,labelpad=labelpadx)
axes[1,2].set_ylabel('$|\hat{u}_z|$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[1,2].set_xlim(0,wLim)
axes[1,2].tick_params(labelsize=ticksize)
fig.tight_layout()
savefig(f'{FIG_DIR:s}{fftbn:s}.png')
plt.close()
####################
# Plot time series #
####################
P = 5 # last P% of the time series
Nperiods = 4
if float(wf)>0:
M = int(Nperiods*NtsT)
# else: COMPLETE THIS PART
# if wFFT == 0:
# M = int(len(t)*P/100)
# else:
# TUmin = Nperiods*2*np.pi/wFFT
# M = ceil(TUmin/dt)
ticksize = 12
labelsize = 18
labelpadx = 3
labelpady = 10
w = 1+float(alpha)*np.cos(float(wf)*t[-M:])
fig, axes = plt.subplots(nrows=2,ncols=3,figsize=(14,9)) # Create canvas & axes
## Global Kinetic Energy Time Series
axes[0,0].plot(t[-M:]/Period,Ek[-M:],'r-')
axes[0,0].set_xlabel('$t/T_f$',fontsize=labelsize,labelpad=labelpadx)
axes[0,0].set_ylabel('$E_k$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[0,0].tick_params(labelsize=ticksize)
ax2 = axes[0,0].twinx()
ax2.plot(t[-M:]/Period,w, color='tab:blue')
ax2.set_ylabel('$\omega$',rotation=0,fontsize=labelsize,labelpad=labelpady)
ax2.tick_params(labelsize=ticksize)
## Global Angular Momentum Time Series
axes[0,1].plot(t[-M:]/Period,Ew[-M:],'r-')
axes[0,1].set_xlabel('$t/T_f$',fontsize=labelsize,labelpad=labelpadx)
axes[0,1].set_ylabel('$E_{\omega}$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[0,1].tick_params(labelsize=ticksize)
ax2 = axes[0,1].twinx()
ax2.plot(t[-M:]/Period,w, color='tab:blue')
ax2.set_ylabel('$\omega$',rotation=0,fontsize=labelsize,labelpad=labelpady)
ax2.tick_params(labelsize=ticksize)
## Global Enstrophy Time Series
axes[0,2].plot(t[-M:]/Period,Eg[-M:],'r-')
axes[0,2].set_xlabel('$t/T_f$',fontsize=labelsize,labelpad=labelpadx)
axes[0,2].set_ylabel('$E_{\gamma}$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[0,2].tick_params(labelsize=ticksize)
ax2 = axes[0,2].twinx()
ax2.plot(t[-M:]/Period,w, color='tab:blue')
ax2.set_ylabel('$\omega$',rotation=0,fontsize=labelsize,labelpad=labelpady)
ax2.tick_params(labelsize=ticksize)
## Local Radial Velocity Time Series
axes[1,0].plot(t[-M:]/Period,ur[-M:],'r-')
axes[1,0].set_xlabel('$t/T_f$',fontsize=labelsize,labelpad=labelpadx)
axes[1,0].set_ylabel('$u_r$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[1,0].tick_params(labelsize=ticksize)
ax2 = axes[1,0].twinx()
ax2.plot(t[-M:]/Period,w, color='tab:blue')
ax2.set_ylabel('$\omega$',rotation=0,fontsize=labelsize,labelpad=labelpady)
ax2.tick_params(labelsize=ticksize)
## Local Azimuthal Velocity Time Series
axes[1,1].plot(t[-M:]/Period,uw[-M:],'r-')
axes[1,1].set_xlabel('$t/T_f$',fontsize=labelsize,labelpad=labelpadx)
axes[1,1].set_ylabel(r'$u_{\theta}$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[1,1].tick_params(labelsize=ticksize)
ax2 = axes[1,1].twinx()
ax2.plot(t[-M:]/Period,w, color='tab:blue')
ax2.set_ylabel('$\omega$',rotation=0,fontsize=labelsize,labelpad=labelpady)
ax2.tick_params(labelsize=ticksize)
## Local Axial Velocity Time Series
axes[1,2].plot(t[-M:]/Period,uz[-M:],'r-')
axes[1,2].set_xlabel('$t/T_f$',fontsize=labelsize,labelpad=labelpadx)
axes[1,2].set_ylabel('$u_z$',rotation=0,fontsize=labelsize,labelpad=labelpady)
axes[1,2].tick_params(labelsize=ticksize)
ax2 = axes[1,2].twinx()
ax2.plot(t[-M:]/Period,w, color='tab:blue')
ax2.set_ylabel('$\omega$',rotation=0,fontsize=labelsize,labelpad=labelpady)
ax2.tick_params(labelsize=ticksize)
fig.tight_layout()
fig.savefig(f'{FIG_DIR:s}{tsbn:s}.png')
plt.close()
#####################
# Plot phase orbits #
#####################
elevation = 30
theta0 = 10
dtheta = 35
ticksize = 0.1
labelsize = 16
labelpadx = 0
labelpady = 0
labelpadz = 0
fig = plt.figure(figsize=(14,10)) # Create canvas
## Plot Global Orbit
for j in range(1,4):
ax = fig.add_subplot(2,3,j,projection='3d')
ax.xaxis.set_rotate_label(False) # disable automatic rotation
ax.yaxis.set_rotate_label(False) # disable automatic rotation
ax.zaxis.set_rotate_label(False) # disable automatic rotation
ax.plot(Eg,Ew,Ek,'g-')
ax.set_xlabel('$E_{\gamma}$',fontsize=labelsize,labelpad=labelpadx)
ax.set_ylabel('$E_{\omega}$',rotation=0,fontsize=labelsize,labelpad=labelpady)
ax.set_zlabel('$E_k$',rotation=0,fontsize=labelsize,labelpad=labelpadz)
ax.tick_params(labelsize=ticksize)
ax.view_init(elevation,theta0+(j-1)*dtheta)
## Plot Local Orbit
for j in range(1,4):
ax = fig.add_subplot(2,3,j+3,projection='3d')
ax.xaxis.set_rotate_label(False) # disable automatic rotation
ax.yaxis.set_rotate_label(False) # disable automatic rotation
ax.zaxis.set_rotate_label(False) # disable automatic rotation
ax.plot(ur,uw,uz,'b-')
ax.set_xlabel('$u_r$',fontsize=labelsize,labelpad=labelpadx)
ax.set_ylabel(r'$u_{\theta}$',rotation=0,fontsize=labelsize,labelpad=labelpady)
ax.set_zlabel('$u_z$',rotation=0,fontsize=labelsize,labelpad=labelpadz)
ax.tick_params(labelsize=ticksize)
ax.view_init(elevation,theta0+(j-1)*dtheta)
fig.tight_layout()
savefig(f'{FIG_DIR:s}{orbitbn:s}.png')
plt.close()
##############
# Write Data #
##############
#(pkpkAmpEk, relErrEk) = pktopkAmp(Ek)
AvgEk = sum(Ek[-NtsT:])*dt/Period
dataFile = longbn+'.txt'
df = pd.DataFrame(columns=['runs_#','Bo','Re','alpha','w_f','NtsT','NT','w*','stdEk','AvgEk'])
df = addRow(df,runs,Bo,Re,alpha,wf,NtsT,NT,wFFT,AEk,AvgEk)
with open(os.path.join(DAT_DIR, dataFile),'w') as outfile:
df.to_csv(outfile,header=True,index=False,sep=' ')
outfile.close()
return None
if __name__ == '__main__':
main()
| [
"os.remove",
"pandas.read_csv",
"pylab.detrend",
"matplotlib.pyplot.figure",
"scipy.signal.find_peaks",
"numpy.mean",
"glob.glob",
"os.path.join",
"pandas.DataFrame",
"numpy.std",
"matplotlib.pyplot.close",
"os.path.exists",
"numpy.max",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"... | [((874, 908), 'scipy.signal.find_peaks', 'find_peaks', (['S[-M:]'], {'height': 'thresUp'}), '(S[-M:], height=thresUp)\n', (884, 908), False, 'from scipy.signal import find_peaks\n'), ((924, 961), 'scipy.signal.find_peaks', 'find_peaks', (['(-S[-M:])'], {'height': '(-thresDwn)'}), '(-S[-M:], height=-thresDwn)\n', (934, 961), False, 'from scipy.signal import find_peaks\n'), ((1799, 1901), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['runs_#', 'Bo', 'Re', 'alpha', 'w_f', 'NtsT', 'NT', 'w*', 'stdEk', 'AvgEk']"}), "(columns=['runs_#', 'Bo', 'Re', 'alpha', 'w_f', 'NtsT', 'NT',\n 'w*', 'stdEk', 'AvgEk'])\n", (1811, 1901), True, 'import pandas as pd\n'), ((1899, 1932), 'os.path.exists', 'os.path.exists', (['(DAT_DIR + outfile)'], {}), '(DAT_DIR + outfile)\n', (1913, 1932), False, 'import sys, os\n'), ((2009, 2032), 'glob.glob', 'glob', (['(DAT_DIR + infiles)'], {}), '(DAT_DIR + infiles)\n', (2013, 2032), False, 'from glob import glob\n'), ((4090, 4125), 'os.makedirs', 'os.makedirs', (['FIG_DIR'], {'exist_ok': '(True)'}), '(FIG_DIR, exist_ok=True)\n', (4101, 4125), False, 'import sys, os\n'), ((5622, 5669), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': '(14, 9)'}), '(nrows=2, ncols=3, figsize=(14, 9))\n', (5634, 5669), True, 'from matplotlib import pyplot as plt\n'), ((9028, 9064), 'pylab.savefig', 'savefig', (['f"""{FIG_DIR:s}{fftbn:s}.png"""'], {}), "(f'{FIG_DIR:s}{fftbn:s}.png')\n", (9035, 9064), False, 'from pylab import detrend, fft, savefig\n'), ((9067, 9078), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9076, 9078), True, 'from matplotlib import pyplot as plt\n'), ((9548, 9595), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': '(14, 9)'}), '(nrows=2, ncols=3, figsize=(14, 9))\n', (9560, 9595), True, 'from matplotlib import pyplot as plt\n'), ((12517, 12528), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12526, 12528), True, 'from matplotlib import pyplot as plt\n'), ((12732, 12760), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 10)'}), '(figsize=(14, 10))\n', (12742, 12760), True, 'from matplotlib import pyplot as plt\n'), ((14115, 14153), 'pylab.savefig', 'savefig', (['f"""{FIG_DIR:s}{orbitbn:s}.png"""'], {}), "(f'{FIG_DIR:s}{orbitbn:s}.png')\n", (14122, 14153), False, 'from pylab import detrend, fft, savefig\n'), ((14156, 14167), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14165, 14167), True, 'from matplotlib import pyplot as plt\n'), ((14326, 14428), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['runs_#', 'Bo', 'Re', 'alpha', 'w_f', 'NtsT', 'NT', 'w*', 'stdEk', 'AvgEk']"}), "(columns=['runs_#', 'Bo', 'Re', 'alpha', 'w_f', 'NtsT', 'NT',\n 'w*', 'stdEk', 'AvgEk'])\n", (14338, 14428), True, 'import pandas as pd\n'), ((774, 784), 'numpy.mean', 'np.mean', (['S'], {}), '(S)\n', (781, 784), True, 'import numpy as np\n'), ((823, 833), 'numpy.mean', 'np.mean', (['S'], {}), '(S)\n', (830, 833), True, 'import numpy as np\n'), ((973, 995), 'numpy.mean', 'np.mean', (['S[-M:][peaks]'], {}), '(S[-M:][peaks])\n', (980, 995), True, 'import numpy as np\n'), ((998, 1022), 'numpy.mean', 'np.mean', (['S[-M:][valleys]'], {}), '(S[-M:][valleys])\n', (1005, 1022), True, 'import numpy as np\n'), ((1941, 1994), 'pandas.read_csv', 'pd.read_csv', (['(DAT_DIR + outfile)'], {'sep': '""" """', 'dtype': 'object'}), "(DAT_DIR + outfile, sep=' ', dtype=object)\n", (1952, 1994), True, 'import pandas as pd\n'), ((2836, 2853), 'os.remove', 'os.remove', (['infile'], {}), '(infile)\n', (2845, 2853), False, 'import sys, os\n'), ((4072, 4085), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (4082, 4085), True, 'import numpy as np\n'), ((14490, 14521), 'os.path.join', 'os.path.join', (['DAT_DIR', 'dataFile'], {}), '(DAT_DIR, dataFile)\n', (14502, 14521), False, 'import sys, os\n'), ((788, 797), 'numpy.max', 'np.max', (['S'], {}), '(S)\n', (794, 797), True, 'import numpy as np\n'), ((798, 808), 'numpy.mean', 'np.mean', (['S'], {}), '(S)\n', (805, 808), True, 'import numpy as np\n'), ((837, 847), 'numpy.mean', 'np.mean', (['S'], {}), '(S)\n', (844, 847), True, 'import numpy as np\n'), ((848, 857), 'numpy.min', 'np.min', (['S'], {}), '(S)\n', (854, 857), True, 'import numpy as np\n'), ((1036, 1057), 'numpy.std', 'np.std', (['S[-M:][peaks]'], {}), '(S[-M:][peaks])\n', (1042, 1057), True, 'import numpy as np\n'), ((1063, 1086), 'numpy.std', 'np.std', (['S[-M:][valleys]'], {}), '(S[-M:][valleys])\n', (1069, 1086), True, 'import numpy as np\n'), ((4341, 4357), 'pylab.detrend', 'detrend', (['Ek[-M:]'], {}), '(Ek[-M:])\n', (4348, 4357), False, 'from pylab import detrend, fft, savefig\n'), ((4358, 4364), 'scipy.signal.blackman', 'blk', (['M'], {}), '(M)\n', (4361, 4364), True, 'from scipy.signal import blackman as blk\n'), ((4533, 4549), 'pylab.detrend', 'detrend', (['Eg[-M:]'], {}), '(Eg[-M:])\n', (4540, 4549), False, 'from pylab import detrend, fft, savefig\n'), ((4550, 4556), 'scipy.signal.blackman', 'blk', (['M'], {}), '(M)\n', (4553, 4556), True, 'from scipy.signal import blackman as blk\n'), ((4725, 4741), 'pylab.detrend', 'detrend', (['Ew[-M:]'], {}), '(Ew[-M:])\n', (4732, 4741), False, 'from pylab import detrend, fft, savefig\n'), ((4742, 4748), 'scipy.signal.blackman', 'blk', (['M'], {}), '(M)\n', (4745, 4748), True, 'from scipy.signal import blackman as blk\n'), ((4917, 4933), 'pylab.detrend', 'detrend', (['ur[-M:]'], {}), '(ur[-M:])\n', (4924, 4933), False, 'from pylab import detrend, fft, savefig\n'), ((4934, 4940), 'scipy.signal.blackman', 'blk', (['M'], {}), '(M)\n', (4937, 4940), True, 'from scipy.signal import blackman as blk\n'), ((5109, 5125), 'pylab.detrend', 'detrend', (['uw[-M:]'], {}), '(uw[-M:])\n', (5116, 5125), False, 'from pylab import detrend, fft, savefig\n'), ((5126, 5132), 'scipy.signal.blackman', 'blk', (['M'], {}), '(M)\n', (5129, 5132), True, 'from scipy.signal import blackman as blk\n'), ((5301, 5317), 'pylab.detrend', 'detrend', (['uz[-M:]'], {}), '(uz[-M:])\n', (5308, 5317), False, 'from pylab import detrend, fft, savefig\n'), ((5318, 5324), 'scipy.signal.blackman', 'blk', (['M'], {}), '(M)\n', (5321, 5324), True, 'from scipy.signal import blackman as blk\n')] |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
sum_a = 0
sum_d = 0
for i in range(1000):
eps = np.finfo(float).eps
data = np.random.uniform(low=-1, high=1.0 + eps, size=(1, 2))
y = np.sin(data*np.pi)
rand_points = np.transpose(np.vstack((data, y)))
# y = mx + c
x = np.array(rand_points[:,0])
# A = np.column_stack((np.ones(2), X))
y = np.array(rand_points[:,1])
a2 = x[0]**2 + x[1]**2
a1 = -2*(x[0]*y[0] + x[1]*y[1])
a0 = y[0]**2 + y[1]**2
p = np.poly1d([a2, a1, a0])
p1 = np.polyder(p)
solution1 = np.roots(p1)
# solution = np.roots(p)
# m = np.polyfit(X, Y, 1, full=True)[0]
# sum_c = sum_c + c
sum_a = sum_a + solution1
# print(sum_c/1000)
# print(sum_m/1000)
print(sum_a/1000)
for j in range(1000):
eps = np.finfo(float).eps
data = np.random.uniform(low=-1, high=1.0 + eps, size=(1, 2))
y = np.sin(data*np.pi)
rand_points = np.transpose(np.vstack((data, y)))
x = np.array(rand_points[:,0])
y = np.array(rand_points[:,1])
a2 = x[0]**2 + x[1]**2
a1 = -2*(x[0]*y[0] + x[1]*y[1])
a0 = y[0]**2 + y[1]**2
p = np.poly1d([a2, a1, a0])
p1 = np.polyder(p)
a = np.roots(p1)
#dif = (sum_a - a)**2
coeff = np.array(([sum_a/1000]))
a1 = np.array(([a]))
# y = c + mx
qty = 10
x0 = np.arange(-1, 1, 2.0/(qty*1.0), dtype=float)
# x = np.column_stack((np.ones(qty), x0))
y_g = x0*coeff
y_g1 = x0*a1
dif = (y_g - y_g1)**2
var = np.mean(dif)
#print(y_g1)
sum_d = sum_d + var
print(sum_d/1000)
| [
"numpy.random.uniform",
"numpy.poly1d",
"numpy.roots",
"numpy.polyder",
"numpy.finfo",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.vstack"
] | [((190, 244), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1.0 + eps)', 'size': '(1, 2)'}), '(low=-1, high=1.0 + eps, size=(1, 2))\n', (207, 244), True, 'import numpy as np\n'), ((253, 273), 'numpy.sin', 'np.sin', (['(data * np.pi)'], {}), '(data * np.pi)\n', (259, 273), True, 'import numpy as np\n'), ((360, 387), 'numpy.array', 'np.array', (['rand_points[:, 0]'], {}), '(rand_points[:, 0])\n', (368, 387), True, 'import numpy as np\n'), ((437, 464), 'numpy.array', 'np.array', (['rand_points[:, 1]'], {}), '(rand_points[:, 1])\n', (445, 464), True, 'import numpy as np\n'), ((562, 585), 'numpy.poly1d', 'np.poly1d', (['[a2, a1, a0]'], {}), '([a2, a1, a0])\n', (571, 585), True, 'import numpy as np\n'), ((595, 608), 'numpy.polyder', 'np.polyder', (['p'], {}), '(p)\n', (605, 608), True, 'import numpy as np\n'), ((625, 637), 'numpy.roots', 'np.roots', (['p1'], {}), '(p1)\n', (633, 637), True, 'import numpy as np\n'), ((890, 944), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1.0 + eps)', 'size': '(1, 2)'}), '(low=-1, high=1.0 + eps, size=(1, 2))\n', (907, 944), True, 'import numpy as np\n'), ((953, 973), 'numpy.sin', 'np.sin', (['(data * np.pi)'], {}), '(data * np.pi)\n', (959, 973), True, 'import numpy as np\n'), ((1038, 1065), 'numpy.array', 'np.array', (['rand_points[:, 0]'], {}), '(rand_points[:, 0])\n', (1046, 1065), True, 'import numpy as np\n'), ((1073, 1100), 'numpy.array', 'np.array', (['rand_points[:, 1]'], {}), '(rand_points[:, 1])\n', (1081, 1100), True, 'import numpy as np\n'), ((1198, 1221), 'numpy.poly1d', 'np.poly1d', (['[a2, a1, a0]'], {}), '([a2, a1, a0])\n', (1207, 1221), True, 'import numpy as np\n'), ((1231, 1244), 'numpy.polyder', 'np.polyder', (['p'], {}), '(p)\n', (1241, 1244), True, 'import numpy as np\n'), ((1253, 1265), 'numpy.roots', 'np.roots', (['p1'], {}), '(p1)\n', (1261, 1265), True, 'import numpy as np\n'), ((1305, 1329), 'numpy.array', 'np.array', (['[sum_a / 1000]'], {}), '([sum_a / 1000])\n', (1313, 1329), True, 'import numpy as np\n'), ((1339, 1352), 'numpy.array', 'np.array', (['[a]'], {}), '([a])\n', (1347, 1352), True, 'import numpy as np\n'), ((1394, 1442), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(2.0 / (qty * 1.0))'], {'dtype': 'float'}), '(-1, 1, 2.0 / (qty * 1.0), dtype=float)\n', (1403, 1442), True, 'import numpy as np\n'), ((1557, 1569), 'numpy.mean', 'np.mean', (['dif'], {}), '(dif)\n', (1564, 1569), True, 'import numpy as np\n'), ((159, 174), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (167, 174), True, 'import numpy as np\n'), ((308, 328), 'numpy.vstack', 'np.vstack', (['(data, y)'], {}), '((data, y))\n', (317, 328), True, 'import numpy as np\n'), ((859, 874), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (867, 874), True, 'import numpy as np\n'), ((1008, 1028), 'numpy.vstack', 'np.vstack', (['(data, y)'], {}), '((data, y))\n', (1017, 1028), True, 'import numpy as np\n')] |
import numpy as np
import collections
import pygame
import time
import gym
from gym import error, spaces, utils
from gym.utils import seeding
class SnakeEnv(gym.Env):
metadata = {
'render.modes': ['human']
}
def __init__(self):
self.grid_shape = (15, 15)
self.grid = None
self.topleft, self.bottomright = np.array([0, 0]), np.array(list(self.grid_shape))
self.yx_to_index_table = np.array([i*self.grid_shape[1]+np.arange(self.grid_shape[1])
for i in np.arange(self.grid_shape[0])])
self.index_to_yx_table = np.array([(i // self.grid_shape[1], i % self.grid_shape[1])
for i in np.arange(self.grid_shape[0]*self.grid_shape[1])])
self.max_steps = 1000
self.head_position = None
self.initial_length = 3
self.directions = np.array([[-1, 0], [0, -1], [1, 0], [0, 1]])
self.direction_pointer = 0
self.snake_heading = None
self.snake_body = None
# render
self.screen = None
def step(self, action):
action = 1 if action>1 else -1 if action<-1 else action
# action = max(min(action, 1), -1)
self.direction_pointer = (self.direction_pointer+action)%4
self.snake_heading = self.directions[self.direction_pointer]
head_y, head_x = new_head_yx = self.head_position + self.snake_heading
if head_y < self.topleft[0] or\
head_y >= self.bottomright[0] or\
head_x < self.topleft[1] or\
head_x >= self.bottomright[1]:
tail_y, tail_x = self.snake_body.pop()
self.grid[self.yx_to_index(tail_y, tail_x)] = 0.
return self.grid, -1, True, {}
head_index = self.yx_to_index(head_y, head_x)
if self.grid[head_index] == 1.\
or self.current_step>=self.max_steps:
tail_y, tail_x = self.snake_body.pop()
self.grid[self.yx_to_index(tail_y, tail_x)] = 0.
return self.grid, -1, True, {}
if self.grid[head_index] == 2.:
reward = 1
self.grid[head_index] = 1.
self.generate_new_food()
else:
tail_y, tail_x = self.snake_body.pop()
self.grid[self.yx_to_index(tail_y, tail_x)] = 0.
reward = 0
self.grid[head_index] = 1.
self.head_position = new_head_yx
self.snake_body.appendleft(new_head_yx)
self.current_step += 1
return self.grid, reward, False, {}
def reset(self):
self.grid = np.zeros(self.grid_shape[0]*self.grid_shape[1])
self.current_step = 0
self.head_position = np.array([5, 5])
self.direction_pointer = 0
self.snake_heading = self.directions[self.direction_pointer]
self.snake_body = collections.deque([self.head_position])
self.grid[self.yx_to_index(self.head_position[0], self.head_position[1])] = 1.
for i in range(1, self.initial_length):
y, x = yx = self.head_position + i * self.snake_heading
self.snake_body.append(yx)
self.grid[self.yx_to_index(y, x)] = 1.
self.direction_pointer = 2
self.generate_new_food()
return self.grid.flatten()
def render(self, mode='human', close=False):
if self.screen is None:
pygame.init()
self.size = self.width, self.height = 600, 600
self.rect_height, self.rect_width = tuple(self.size / np.array(list(self.grid_shape)))
self.screen = pygame.display.set_mode(self.size)
pygame.draw.rect(self.screen, (0, 128, 255), pygame.Rect(0, 0, self.width, self.height))
indexes = np.where(self.grid != 0.)[0]
yxs = [list(self.index_to_yx(index)) for index in indexes]
for i, yx in zip(indexes, yxs):
y, x = yx
v = self.grid[i]
if v == 1:
if (x+y)%2==0:
c = (128, 128, 0)
else:
c = (128, 255, 0)
else:
c = (255, 128, 0)
pygame.draw.rect(self.screen, c,
pygame.Rect(x*self.rect_width, y*self.rect_height, self.rect_width, self.rect_height))
y, x = self.head_position
pygame.draw.rect(self.screen, (255, 0, 0),
pygame.Rect(x*self.rect_width, y*self.rect_height, self.rect_width, self.rect_height))
pygame.display.flip()
time.sleep(.166)
def index_to_yx(self, index):
# y, x = index // self.grid_shape[1], index % self.grid_shape[1]
return self.index_to_yx_table[index]
def yx_to_index(self, y, x):
# index = y*self.grid_shape[1]+x
return self.yx_to_index_table[y][x]
def generate_new_food(self):
inds = np.where(self.grid == 0)[0]
ind = np.random.choice(inds)
self.grid[ind] = 2.
| [
"pygame.display.set_mode",
"pygame.Rect",
"numpy.zeros",
"pygame.init",
"time.sleep",
"pygame.display.flip",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.random.choice",
"collections.deque"
] | [((914, 958), 'numpy.array', 'np.array', (['[[-1, 0], [0, -1], [1, 0], [0, 1]]'], {}), '([[-1, 0], [0, -1], [1, 0], [0, 1]])\n', (922, 958), True, 'import numpy as np\n'), ((2631, 2680), 'numpy.zeros', 'np.zeros', (['(self.grid_shape[0] * self.grid_shape[1])'], {}), '(self.grid_shape[0] * self.grid_shape[1])\n', (2639, 2680), True, 'import numpy as np\n'), ((2738, 2754), 'numpy.array', 'np.array', (['[5, 5]'], {}), '([5, 5])\n', (2746, 2754), True, 'import numpy as np\n'), ((2887, 2926), 'collections.deque', 'collections.deque', (['[self.head_position]'], {}), '([self.head_position])\n', (2904, 2926), False, 'import collections\n'), ((4528, 4549), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4547, 4549), False, 'import pygame\n'), ((4558, 4575), 'time.sleep', 'time.sleep', (['(0.166)'], {}), '(0.166)\n', (4568, 4575), False, 'import time\n'), ((4938, 4960), 'numpy.random.choice', 'np.random.choice', (['inds'], {}), '(inds)\n', (4954, 4960), True, 'import numpy as np\n'), ((365, 381), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (373, 381), True, 'import numpy as np\n'), ((3419, 3432), 'pygame.init', 'pygame.init', ([], {}), '()\n', (3430, 3432), False, 'import pygame\n'), ((3617, 3651), 'pygame.display.set_mode', 'pygame.display.set_mode', (['self.size'], {}), '(self.size)\n', (3640, 3651), False, 'import pygame\n'), ((3706, 3748), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', 'self.width', 'self.height'], {}), '(0, 0, self.width, self.height)\n', (3717, 3748), False, 'import pygame\n'), ((3769, 3795), 'numpy.where', 'np.where', (['(self.grid != 0.0)'], {}), '(self.grid != 0.0)\n', (3777, 3795), True, 'import numpy as np\n'), ((4432, 4525), 'pygame.Rect', 'pygame.Rect', (['(x * self.rect_width)', '(y * self.rect_height)', 'self.rect_width', 'self.rect_height'], {}), '(x * self.rect_width, y * self.rect_height, self.rect_width,\n self.rect_height)\n', (4443, 4525), False, 'import pygame\n'), ((4896, 4920), 'numpy.where', 'np.where', (['(self.grid == 0)'], {}), '(self.grid == 0)\n', (4904, 4920), True, 'import numpy as np\n'), ((4234, 4327), 'pygame.Rect', 'pygame.Rect', (['(x * self.rect_width)', '(y * self.rect_height)', 'self.rect_width', 'self.rect_height'], {}), '(x * self.rect_width, y * self.rect_height, self.rect_width,\n self.rect_height)\n', (4245, 4327), False, 'import pygame\n'), ((479, 508), 'numpy.arange', 'np.arange', (['self.grid_shape[1]'], {}), '(self.grid_shape[1])\n', (488, 508), True, 'import numpy as np\n'), ((561, 590), 'numpy.arange', 'np.arange', (['self.grid_shape[0]'], {}), '(self.grid_shape[0])\n', (570, 590), True, 'import numpy as np\n'), ((738, 788), 'numpy.arange', 'np.arange', (['(self.grid_shape[0] * self.grid_shape[1])'], {}), '(self.grid_shape[0] * self.grid_shape[1])\n', (747, 788), True, 'import numpy as np\n')] |
# Copyright 2019 PIQuIL - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils import parameters_to_vector
from qucumber.utils import cplx, auto_unsqueeze_args
from qucumber import _warn_on_missing_gpu
class PurificationRBM(nn.Module):
r"""An RBM with a hidden and "auxiliary" layer, each separately connected to the visible units
:param num_visible: The number of visible units, i.e. the size of the system
:type num_visible: int
:param num_hidden: The number of units in the hidden layer
:type num_hidden: int
:param num_aux: The number of units in the auxiliary purification layer
:type num_aux: int
:param zero_weights: Whether or not to initialize the weights to zero
:type zero_weights: bool
:param gpu: Whether to perform computations on the default gpu.
:type gpu: bool
"""
def __init__(
self, num_visible, num_hidden=None, num_aux=None, zero_weights=False, gpu=False
):
super().__init__()
self.num_visible = int(num_visible)
self.num_hidden = (
int(num_hidden) if num_hidden is not None else self.num_visible
)
self.num_aux = int(num_aux) if num_aux is not None else self.num_visible
# Parameters are:
# W: The weights of the visible-hidden edges
# U: The weights of the visible-auxiliary edges
# b: The biases of the visible nodes
# c: The biases of the hidden nobdes
# d: The biases of the auxiliary nodes
# The auxiliary bias of the phase RBM is always zero
self.num_pars = (
(self.num_visible * self.num_hidden)
+ (self.num_visible * self.num_aux)
+ self.num_visible
+ self.num_hidden
+ self.num_aux
)
_warn_on_missing_gpu(gpu)
self.gpu = gpu and torch.cuda.is_available()
self.device = torch.device("cuda") if self.gpu else torch.device("cpu")
self.initialize_parameters(zero_weights=zero_weights)
def __repr__(self):
return (
f"PurificationBinaryRBM(num_visible={self.num_visible}, "
f"num_hidden={self.num_hidden}, num_aux={self.num_aux}, gpu={self.gpu})"
)
def initialize_parameters(self, zero_weights=False):
r"""Initialize the parameters of the RBM
:param zero_weights: Whether or not to initialize the weights to zero
:type zero_weights: bool
"""
gen_tensor = torch.zeros if zero_weights else torch.randn
self.weights_W = nn.Parameter(
(
gen_tensor(
self.num_hidden,
self.num_visible,
dtype=torch.double,
device=self.device,
)
/ np.sqrt(self.num_visible)
),
requires_grad=False,
)
self.weights_U = nn.Parameter(
(
gen_tensor(
self.num_aux,
self.num_visible,
dtype=torch.double,
device=self.device,
)
/ np.sqrt(self.num_visible)
),
requires_grad=False,
)
self.visible_bias = nn.Parameter(
torch.zeros(self.num_visible, dtype=torch.double, device=self.device),
requires_grad=False,
)
self.hidden_bias = nn.Parameter(
torch.zeros(self.num_hidden, dtype=torch.double, device=self.device),
requires_grad=False,
)
self.aux_bias = nn.Parameter(
torch.zeros(self.num_aux, dtype=torch.double, device=self.device),
requires_grad=False,
)
@auto_unsqueeze_args()
def effective_energy(self, v, a=None):
r"""Computes the equivalent of the "effective energy" for the RBM. If
`a` is `None`, will analytically trace out the auxiliary units.
:param v: The current state of the visible units. Shape (b, n_v) or (n_v,).
:type v: torch.Tensor
:param a: The current state of the auxiliary units. Shape (b, n_a) or (n_a,).
:type a: torch.Tensor or None
:returns: The "effective energy" of the RBM. Shape (b,) or (1,).
:rtype: torch.Tensor
"""
v = v.to(self.weights_W)
vis_term = torch.matmul(v, self.visible_bias) + F.softplus(
F.linear(v, self.weights_W, self.hidden_bias)
).sum(-1)
if a is not None:
a = (a.unsqueeze(0) if a.dim() < 2 else a).to(self.weights_W)
aux_term = torch.matmul(a, self.aux_bias)
mix_term = torch.einsum("...v,av,...a->...", v, self.weights_U.data, a)
return -(vis_term + aux_term + mix_term)
else:
aux_term = F.softplus(F.linear(v, self.weights_U, self.aux_bias)).sum(-1)
return -(vis_term + aux_term)
def effective_energy_gradient(self, v, reduce=True):
"""The gradients of the effective energies for the given visible states.
:param v: The visible states.
:type v: torch.Tensor
:param reduce: If `True`, will sum over the gradients resulting from
each visible state. Otherwise will return a batch of
gradient vectors.
:type reduce: bool
:returns: Will return a vector (or matrix if `reduce=False` and multiple
visible states were given as a matrix) containing the gradients
for all parameters (computed on the given visible states v).
:rtype: torch.Tensor
"""
v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.weights_W)
ph = self.prob_h_given_v(v)
pa = self.prob_a_given_v(v)
if reduce:
W_grad = -torch.matmul(ph.transpose(0, -1), v)
U_grad = -torch.matmul(pa.transpose(0, -1), v)
vb_grad = -torch.sum(v, 0)
hb_grad = -torch.sum(ph, 0)
ab_grad = -torch.sum(pa, 0)
return parameters_to_vector([W_grad, U_grad, vb_grad, hb_grad, ab_grad])
else:
W_grad = -torch.einsum("...j,...k->...jk", ph, v).view(*v.shape[:-1], -1)
U_grad = -torch.einsum("...j,...k->...jk", pa, v).view(*v.shape[:-1], -1)
vb_grad = -v
hb_grad = -ph
ab_grad = -pa
vec = [W_grad, U_grad, vb_grad, hb_grad, ab_grad]
return torch.cat(vec, dim=-1)
@auto_unsqueeze_args()
def prob_h_given_v(self, v, out=None):
r"""Given a visible unit configuration, compute the probability
vector of the hidden units being on
:param v: The visible units
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the hidden units being active
given the visible state
:rtype torch.Tensor:
"""
return (
torch.matmul(v, self.weights_W.data.t(), out=out)
.add_(self.hidden_bias.data)
.sigmoid_()
.clamp_(min=0, max=1)
)
@auto_unsqueeze_args()
def prob_a_given_v(self, v, out=None):
r"""Given a visible unit configuration, compute the probability
vector of the auxiliary units being on
:param v: The visible units
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the auxiliary units being active
given the visible state
:rtype torch.Tensor:
"""
return (
torch.matmul(v, self.weights_U.data.t(), out=out)
.add_(self.aux_bias.data)
.sigmoid_()
.clamp_(min=0, max=1)
)
@auto_unsqueeze_args(1, 2)
def prob_v_given_ha(self, h, a, out=None):
r"""Given a hidden and auxiliary unit configuration, compute
the probability vector of the hidden units being on
:param h: The hidden units
:type h: torch.Tensor
:param a: The auxiliary units
:type a: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the visible units being
active given the hidden and auxiliary states
:rtype torch.Tensor:
"""
return (
torch.matmul(h, self.weights_W.data, out=out)
.add_(self.visible_bias.data)
.add_(torch.matmul(a, self.weights_U.data))
.sigmoid_()
.clamp_(min=0, max=1)
)
def sample_a_given_v(self, v, out=None):
r"""Sample/generate an auxiliary state given a visible state
:param v: The visible state
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled auxiliary state
:rtype: torch.Tensor
"""
a = self.prob_a_given_v(v, out=out)
a = torch.bernoulli(a, out=out)
return a
def sample_h_given_v(self, v, out=None):
r"""Sample/generate a hidden state given a visible state
:param v: The visible state
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled hidden state
:rtype: torch.Tensor
"""
h = self.prob_h_given_v(v, out=out)
h = torch.bernoulli(h, out=out)
return h
def sample_v_given_ha(self, h, a, out=None):
r"""Sample/generate a visible state given the
hidden and auxiliary states
:param h: The hidden state
:type h: torch.Tensor
:param a: The auxiliary state
:type a: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled visible state
:rtype: torch.Tensor
"""
v = self.prob_v_given_ha(h, a, out=out)
v = torch.bernoulli(v, out=out)
return v
def gibbs_steps(self, k, initial_state, overwrite=False):
r"""Perform k steps of Block Gibbs sampling. One step consists of
sampling the hidden and auxiliary states from the visible state, and
then sampling the visible state from the hidden and auxiliary states
:param k: The number of Block Gibbs steps
:type k: int
:param initial_state: The initial visible state
:type initial_state: torch.Tensor
:param overwrite: Whether to overwrite the initial_state tensor.
Exception: If initial_state is not on the same device
as the RBM, it will NOT be overwritten.
:type overwrite: bool
:returns: Returns the visible states after k steps of
Block Gibbs sampling
:rtype: torch.Tensor
"""
v = (initial_state if overwrite else initial_state.clone()).to(self.weights_W)
h = torch.zeros(*v.shape[:-1], self.num_hidden).to(self.weights_W)
a = torch.zeros(*v.shape[:-1], self.num_aux).to(self.weights_W)
for _ in range(k):
self.sample_h_given_v(v, out=h)
self.sample_a_given_v(v, out=a)
self.sample_v_given_ha(h, a, out=v)
return v
@auto_unsqueeze_args()
def mixing_term(self, v):
r"""Describes the extent of mixing in the system,
:math:`V_\theta = \frac{1}{2}U_\theta \bm{\sigma} + d_\theta`
:param v: The visible state of the system
:type v: torch.Tensor
:returns: The term describing the mixing of the system
:rtype: torch.Tensor
"""
return F.linear(v, 0.5 * self.weights_U, self.aux_bias)
def gamma(self, v, vp, eta=1, expand=True):
r"""Calculates elements of the :math:`\Gamma^{(\eta)}` matrix,
where :math:`\eta = \pm`.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\Gamma^{(\eta)}|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\Gamma^{(\eta)}|\sigma'_i\rangle`.
:param v: A batch of visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other batch of visible states, :math:`\sigma'`.
:type vp: torch.Tensor
:param eta: Determines which gamma matrix elements to compute.
:type eta: int
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
Ignored if both inputs are vectors, in which case, a
scalar is returned.
:type expand: bool
:returns: The matrix element given by
:math:`\langle\sigma|\Gamma^{(\eta)}|\sigma'\rangle`
:rtype: torch.Tensor
"""
sign = np.sign(eta)
if v.dim() < 2 and vp.dim() < 2:
temp = torch.dot(v + sign * vp, self.visible_bias)
temp += F.softplus(F.linear(v, self.weights_W, self.hidden_bias)).sum()
temp += (
sign * F.softplus(F.linear(vp, self.weights_W, self.hidden_bias)).sum()
)
else:
temp1 = torch.matmul(v, self.visible_bias) + (
F.softplus(F.linear(v, self.weights_W, self.hidden_bias)).sum(-1)
)
temp2 = torch.matmul(vp, self.visible_bias) + (
F.softplus(F.linear(vp, self.weights_W, self.hidden_bias)).sum(-1)
)
if expand:
temp = temp1.unsqueeze_(1) + (sign * temp2.unsqueeze_(0))
else:
temp = temp1 + (sign * temp2)
return 0.5 * temp
def gamma_grad(self, v, vp, eta=1, expand=False):
r"""Calculates elements of the gradient of
the :math:`\Gamma^{(\eta)}` matrix, where :math:`\eta = \pm`.
:param v: A batch of visible states, :math:`\sigma`
:type v: torch.Tensor
:param vp: The other batch of visible states, :math:`\sigma'`
:type vp: torch.Tensor
:param eta: Determines which gamma matrix elements to compute.
:type eta: int
:param expand: Whether to return a rank-3 tensor (`True`) or a matrix (`False`).
:type expand: bool
:returns: The matrix element given by
:math:`\langle\sigma|\nabla_\lambda\Gamma^{(\eta)}|\sigma'\rangle`
:rtype: torch.Tensor
"""
sign = np.sign(eta)
unsqueezed = v.dim() < 2 or vp.dim() < 2
v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.weights_W)
vp = (vp.unsqueeze(0) if vp.dim() < 2 else vp).to(self.weights_W)
prob_h = self.prob_h_given_v(v)
prob_hp = self.prob_h_given_v(vp)
W_grad_ = torch.einsum("...j,...k->...jk", prob_h, v)
W_grad_p = torch.einsum("...j,...k->...jk", prob_hp, vp)
if expand:
W_grad = 0.5 * (W_grad_.unsqueeze_(1) + sign * W_grad_p.unsqueeze_(0))
vb_grad = 0.5 * (v.unsqueeze(1) + sign * vp.unsqueeze(0))
hb_grad = 0.5 * (prob_h.unsqueeze_(1) + sign * prob_hp.unsqueeze_(0))
else:
W_grad = 0.5 * (W_grad_ + sign * W_grad_p)
vb_grad = 0.5 * (v + sign * vp)
hb_grad = 0.5 * (prob_h + sign * prob_hp)
batch_sizes = (
(v.shape[0], vp.shape[0], *v.shape[1:-1]) if expand else (*v.shape[:-1],)
)
U_grad = torch.zeros_like(self.weights_U).expand(*batch_sizes, -1, -1)
ab_grad = torch.zeros_like(self.aux_bias).expand(*batch_sizes, -1)
vec = [
W_grad.view(*batch_sizes, -1),
U_grad.view(*batch_sizes, -1),
vb_grad,
hb_grad,
ab_grad,
]
if unsqueezed and not expand:
vec = [grad.squeeze_(0) for grad in vec]
return cplx.make_complex(torch.cat(vec, dim=-1))
def partition(self, space):
r"""Computes the partition function
:param space: The Hilbert space of the visible units
:type space: torch.Tensor
:returns: The partition function
:rtype: torch.Tensor
"""
logZ = (-self.effective_energy(space)).logsumexp(0)
return logZ.exp()
| [
"torch.dot",
"qucumber._warn_on_missing_gpu",
"torch.zeros_like",
"torch.nn.utils.parameters_to_vector",
"torch.cat",
"torch.nn.functional.linear",
"torch.zeros",
"torch.einsum",
"torch.cuda.is_available",
"torch.device",
"numpy.sign",
"torch.bernoulli",
"torch.matmul",
"torch.sum",
"quc... | [((4344, 4365), 'qucumber.utils.auto_unsqueeze_args', 'auto_unsqueeze_args', ([], {}), '()\n', (4363, 4365), False, 'from qucumber.utils import cplx, auto_unsqueeze_args\n'), ((7097, 7118), 'qucumber.utils.auto_unsqueeze_args', 'auto_unsqueeze_args', ([], {}), '()\n', (7116, 7118), False, 'from qucumber.utils import cplx, auto_unsqueeze_args\n'), ((7772, 7793), 'qucumber.utils.auto_unsqueeze_args', 'auto_unsqueeze_args', ([], {}), '()\n', (7791, 7793), False, 'from qucumber.utils import cplx, auto_unsqueeze_args\n'), ((8450, 8475), 'qucumber.utils.auto_unsqueeze_args', 'auto_unsqueeze_args', (['(1)', '(2)'], {}), '(1, 2)\n', (8469, 8475), False, 'from qucumber.utils import cplx, auto_unsqueeze_args\n'), ((11997, 12018), 'qucumber.utils.auto_unsqueeze_args', 'auto_unsqueeze_args', ([], {}), '()\n', (12016, 12018), False, 'from qucumber.utils import cplx, auto_unsqueeze_args\n'), ((2403, 2428), 'qucumber._warn_on_missing_gpu', '_warn_on_missing_gpu', (['gpu'], {}), '(gpu)\n', (2423, 2428), False, 'from qucumber import _warn_on_missing_gpu\n'), ((9683, 9710), 'torch.bernoulli', 'torch.bernoulli', (['a'], {'out': 'out'}), '(a, out=out)\n', (9698, 9710), False, 'import torch\n'), ((10129, 10156), 'torch.bernoulli', 'torch.bernoulli', (['h'], {'out': 'out'}), '(h, out=out)\n', (10144, 10156), False, 'import torch\n'), ((10676, 10703), 'torch.bernoulli', 'torch.bernoulli', (['v'], {'out': 'out'}), '(v, out=out)\n', (10691, 10703), False, 'import torch\n'), ((12382, 12430), 'torch.nn.functional.linear', 'F.linear', (['v', '(0.5 * self.weights_U)', 'self.aux_bias'], {}), '(v, 0.5 * self.weights_U, self.aux_bias)\n', (12390, 12430), True, 'from torch.nn import functional as F\n'), ((13531, 13543), 'numpy.sign', 'np.sign', (['eta'], {}), '(eta)\n', (13538, 13543), True, 'import numpy as np\n'), ((15142, 15154), 'numpy.sign', 'np.sign', (['eta'], {}), '(eta)\n', (15149, 15154), True, 'import numpy as np\n'), ((15450, 15493), 'torch.einsum', 'torch.einsum', (['"""...j,...k->...jk"""', 'prob_h', 'v'], {}), "('...j,...k->...jk', prob_h, v)\n", (15462, 15493), False, 'import torch\n'), ((15513, 15558), 'torch.einsum', 'torch.einsum', (['"""...j,...k->...jk"""', 'prob_hp', 'vp'], {}), "('...j,...k->...jk', prob_hp, vp)\n", (15525, 15558), False, 'import torch\n'), ((2456, 2481), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2479, 2481), False, 'import torch\n'), ((2505, 2525), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2517, 2525), False, 'import torch\n'), ((2543, 2562), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2555, 2562), False, 'import torch\n'), ((3896, 3965), 'torch.zeros', 'torch.zeros', (['self.num_visible'], {'dtype': 'torch.double', 'device': 'self.device'}), '(self.num_visible, dtype=torch.double, device=self.device)\n', (3907, 3965), False, 'import torch\n'), ((4064, 4132), 'torch.zeros', 'torch.zeros', (['self.num_hidden'], {'dtype': 'torch.double', 'device': 'self.device'}), '(self.num_hidden, dtype=torch.double, device=self.device)\n', (4075, 4132), False, 'import torch\n'), ((4228, 4293), 'torch.zeros', 'torch.zeros', (['self.num_aux'], {'dtype': 'torch.double', 'device': 'self.device'}), '(self.num_aux, dtype=torch.double, device=self.device)\n', (4239, 4293), False, 'import torch\n'), ((4966, 5000), 'torch.matmul', 'torch.matmul', (['v', 'self.visible_bias'], {}), '(v, self.visible_bias)\n', (4978, 5000), False, 'import torch\n'), ((5216, 5246), 'torch.matmul', 'torch.matmul', (['a', 'self.aux_bias'], {}), '(a, self.aux_bias)\n', (5228, 5246), False, 'import torch\n'), ((5270, 5330), 'torch.einsum', 'torch.einsum', (['"""...v,av,...a->..."""', 'v', 'self.weights_U.data', 'a'], {}), "('...v,av,...a->...', v, self.weights_U.data, a)\n", (5282, 5330), False, 'import torch\n'), ((6658, 6723), 'torch.nn.utils.parameters_to_vector', 'parameters_to_vector', (['[W_grad, U_grad, vb_grad, hb_grad, ab_grad]'], {}), '([W_grad, U_grad, vb_grad, hb_grad, ab_grad])\n', (6678, 6723), False, 'from torch.nn.utils import parameters_to_vector\n'), ((7068, 7090), 'torch.cat', 'torch.cat', (['vec'], {'dim': '(-1)'}), '(vec, dim=-1)\n', (7077, 7090), False, 'import torch\n'), ((13604, 13647), 'torch.dot', 'torch.dot', (['(v + sign * vp)', 'self.visible_bias'], {}), '(v + sign * vp, self.visible_bias)\n', (13613, 13647), False, 'import torch\n'), ((16557, 16579), 'torch.cat', 'torch.cat', (['vec'], {'dim': '(-1)'}), '(vec, dim=-1)\n', (16566, 16579), False, 'import torch\n'), ((3403, 3428), 'numpy.sqrt', 'np.sqrt', (['self.num_visible'], {}), '(self.num_visible)\n', (3410, 3428), True, 'import numpy as np\n'), ((3757, 3782), 'numpy.sqrt', 'np.sqrt', (['self.num_visible'], {}), '(self.num_visible)\n', (3764, 3782), True, 'import numpy as np\n'), ((6543, 6558), 'torch.sum', 'torch.sum', (['v', '(0)'], {}), '(v, 0)\n', (6552, 6558), False, 'import torch\n'), ((6582, 6598), 'torch.sum', 'torch.sum', (['ph', '(0)'], {}), '(ph, 0)\n', (6591, 6598), False, 'import torch\n'), ((6622, 6638), 'torch.sum', 'torch.sum', (['pa', '(0)'], {}), '(pa, 0)\n', (6631, 6638), False, 'import torch\n'), ((11674, 11717), 'torch.zeros', 'torch.zeros', (['*v.shape[:-1]', 'self.num_hidden'], {}), '(*v.shape[:-1], self.num_hidden)\n', (11685, 11717), False, 'import torch\n'), ((11749, 11789), 'torch.zeros', 'torch.zeros', (['*v.shape[:-1]', 'self.num_aux'], {}), '(*v.shape[:-1], self.num_aux)\n', (11760, 11789), False, 'import torch\n'), ((13890, 13924), 'torch.matmul', 'torch.matmul', (['v', 'self.visible_bias'], {}), '(v, self.visible_bias)\n', (13902, 13924), False, 'import torch\n'), ((14046, 14081), 'torch.matmul', 'torch.matmul', (['vp', 'self.visible_bias'], {}), '(vp, self.visible_bias)\n', (14058, 14081), False, 'import torch\n'), ((16119, 16151), 'torch.zeros_like', 'torch.zeros_like', (['self.weights_U'], {}), '(self.weights_U)\n', (16135, 16151), False, 'import torch\n'), ((16199, 16230), 'torch.zeros_like', 'torch.zeros_like', (['self.aux_bias'], {}), '(self.aux_bias)\n', (16215, 16230), False, 'import torch\n'), ((5027, 5072), 'torch.nn.functional.linear', 'F.linear', (['v', 'self.weights_W', 'self.hidden_bias'], {}), '(v, self.weights_W, self.hidden_bias)\n', (5035, 5072), True, 'from torch.nn import functional as F\n'), ((5432, 5474), 'torch.nn.functional.linear', 'F.linear', (['v', 'self.weights_U', 'self.aux_bias'], {}), '(v, self.weights_U, self.aux_bias)\n', (5440, 5474), True, 'from torch.nn import functional as F\n'), ((6760, 6799), 'torch.einsum', 'torch.einsum', (['"""...j,...k->...jk"""', 'ph', 'v'], {}), "('...j,...k->...jk', ph, v)\n", (6772, 6799), False, 'import torch\n'), ((6846, 6885), 'torch.einsum', 'torch.einsum', (['"""...j,...k->...jk"""', 'pa', 'v'], {}), "('...j,...k->...jk', pa, v)\n", (6858, 6885), False, 'import torch\n'), ((13679, 13724), 'torch.nn.functional.linear', 'F.linear', (['v', 'self.weights_W', 'self.hidden_bias'], {}), '(v, self.weights_W, self.hidden_bias)\n', (13687, 13724), True, 'from torch.nn import functional as F\n'), ((9169, 9205), 'torch.matmul', 'torch.matmul', (['a', 'self.weights_U.data'], {}), '(a, self.weights_U.data)\n', (9181, 9205), False, 'import torch\n'), ((13788, 13834), 'torch.nn.functional.linear', 'F.linear', (['vp', 'self.weights_W', 'self.hidden_bias'], {}), '(vp, self.weights_W, self.hidden_bias)\n', (13796, 13834), True, 'from torch.nn import functional as F\n'), ((13956, 14001), 'torch.nn.functional.linear', 'F.linear', (['v', 'self.weights_W', 'self.hidden_bias'], {}), '(v, self.weights_W, self.hidden_bias)\n', (13964, 14001), True, 'from torch.nn import functional as F\n'), ((14113, 14159), 'torch.nn.functional.linear', 'F.linear', (['vp', 'self.weights_W', 'self.hidden_bias'], {}), '(vp, self.weights_W, self.hidden_bias)\n', (14121, 14159), True, 'from torch.nn import functional as F\n'), ((9063, 9108), 'torch.matmul', 'torch.matmul', (['h', 'self.weights_W.data'], {'out': 'out'}), '(h, self.weights_W.data, out=out)\n', (9075, 9108), False, 'import torch\n')] |
import cv2
import numpy as np
from common import strel
from common import morpho as m
img = cv2.imread('./Images/papier_15.png')
# Canal rouge car feuille verte et ressort mieux :
imgB = img[:, :, 0] #On conserve le canal bleu
cv2.imshow('Feuille B', imgB)
imgV = img[:, :, 1] #On conserve le canal vert
cv2.imshow('Feuille V', imgV)
imgR = img[:, :, 2] #On conserve le canal rouge
cv2.imshow('Feuille R', imgR)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = imgR
#On cherche a reduire au max le nombre de pixel blanc car moins il y a de pixels et plus ont se rapproche de l'angle recherche
min = np.sum(img)
angle = 0
for a in range (-90, 90, 1):
el = strel.build('ligne', 40, a)
cv2.imshow('Ligne', el)
g = m.gradient(img, el)
cv2.imshow('Calcul', g)
cv2.waitKey(0)
if (np.sum(g) < min) :
min = np.sum(g)
angle = a
print (angle)
| [
"numpy.sum",
"common.strel.build",
"cv2.waitKey",
"cv2.destroyAllWindows",
"common.morpho.gradient",
"cv2.imread",
"cv2.imshow"
] | [((94, 130), 'cv2.imread', 'cv2.imread', (['"""./Images/papier_15.png"""'], {}), "('./Images/papier_15.png')\n", (104, 130), False, 'import cv2\n'), ((229, 258), 'cv2.imshow', 'cv2.imshow', (['"""Feuille B"""', 'imgB'], {}), "('Feuille B', imgB)\n", (239, 258), False, 'import cv2\n'), ((306, 335), 'cv2.imshow', 'cv2.imshow', (['"""Feuille V"""', 'imgV'], {}), "('Feuille V', imgV)\n", (316, 335), False, 'import cv2\n'), ((384, 413), 'cv2.imshow', 'cv2.imshow', (['"""Feuille R"""', 'imgR'], {}), "('Feuille R', imgR)\n", (394, 413), False, 'import cv2\n'), ((414, 428), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (425, 428), False, 'import cv2\n'), ((429, 452), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (450, 452), False, 'import cv2\n'), ((599, 610), 'numpy.sum', 'np.sum', (['img'], {}), '(img)\n', (605, 610), True, 'import numpy as np\n'), ((659, 686), 'common.strel.build', 'strel.build', (['"""ligne"""', '(40)', 'a'], {}), "('ligne', 40, a)\n", (670, 686), False, 'from common import strel\n'), ((691, 714), 'cv2.imshow', 'cv2.imshow', (['"""Ligne"""', 'el'], {}), "('Ligne', el)\n", (701, 714), False, 'import cv2\n'), ((724, 743), 'common.morpho.gradient', 'm.gradient', (['img', 'el'], {}), '(img, el)\n', (734, 743), True, 'from common import morpho as m\n'), ((748, 771), 'cv2.imshow', 'cv2.imshow', (['"""Calcul"""', 'g'], {}), "('Calcul', g)\n", (758, 771), False, 'import cv2\n'), ((776, 790), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (787, 790), False, 'import cv2\n'), ((799, 808), 'numpy.sum', 'np.sum', (['g'], {}), '(g)\n', (805, 808), True, 'import numpy as np\n'), ((832, 841), 'numpy.sum', 'np.sum', (['g'], {}), '(g)\n', (838, 841), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
data = pd.read_csv("1-5-data.csv")
# TODO: Separate the features and the labels into arrays called X and y
X = np.array(data[['x1', 'x2']])
y = np.array(data['y'])
| [
"pandas.read_csv",
"numpy.array"
] | [((50, 77), 'pandas.read_csv', 'pd.read_csv', (['"""1-5-data.csv"""'], {}), "('1-5-data.csv')\n", (61, 77), True, 'import pandas as pd\n'), ((160, 188), 'numpy.array', 'np.array', (["data[['x1', 'x2']]"], {}), "(data[['x1', 'x2']])\n", (168, 188), True, 'import numpy as np\n'), ((194, 213), 'numpy.array', 'np.array', (["data['y']"], {}), "(data['y'])\n", (202, 213), True, 'import numpy as np\n')] |
import numpy as np
def hinge_loss(x, y, w, b, rho):
# Terms
reg = rho*(np.linalg.norm(w)**2)
cost = 1 - np.multiply(y, np.sum(w.T*x, axis = 1) - b)
return np.mean(np.maximum(0, cost)**2) + reg
def dhinge_loss(x, y, w, b, rho):
# Terms
n = x.shape[0]
cost = 1 - np.multiply(y, np.sum(w.T*x, axis = 1) - b)
dreg = 2.*rho*w.T
# Calculation
dcost_w = np.sum(-2.*np.multiply(np.multiply(y[:, np.newaxis], x), np.maximum(0, cost)[:, np.newaxis]), axis = 0) + dreg
dcost_b = np.sum(2.*np.multiply(y, np.maximum(0, cost)))
return (1./n)*dcost_w.T, (1./n)*dcost_b | [
"numpy.maximum",
"numpy.multiply",
"numpy.linalg.norm",
"numpy.sum"
] | [((80, 97), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (94, 97), True, 'import numpy as np\n'), ((132, 155), 'numpy.sum', 'np.sum', (['(w.T * x)'], {'axis': '(1)'}), '(w.T * x, axis=1)\n', (138, 155), True, 'import numpy as np\n'), ((185, 204), 'numpy.maximum', 'np.maximum', (['(0)', 'cost'], {}), '(0, cost)\n', (195, 204), True, 'import numpy as np\n'), ((311, 334), 'numpy.sum', 'np.sum', (['(w.T * x)'], {'axis': '(1)'}), '(w.T * x, axis=1)\n', (317, 334), True, 'import numpy as np\n'), ((549, 568), 'numpy.maximum', 'np.maximum', (['(0)', 'cost'], {}), '(0, cost)\n', (559, 568), True, 'import numpy as np\n'), ((422, 454), 'numpy.multiply', 'np.multiply', (['y[:, np.newaxis]', 'x'], {}), '(y[:, np.newaxis], x)\n', (433, 454), True, 'import numpy as np\n'), ((456, 475), 'numpy.maximum', 'np.maximum', (['(0)', 'cost'], {}), '(0, cost)\n', (466, 475), True, 'import numpy as np\n')] |
import sys
import json
import base64
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import db_connection as db_con
def main(argc, argv):
print('Load file')
file = open('../output/groups.json')
print('Load json content')
groups = json.load(file)
group_sugar_values = groups['products.sugars_100g'][0]['inferred_elements']
for value in group_sugar_values:
group_sugar_values[value] = np.fromstring(base64.decodestring(bytes(group_sugar_values[value]['vector'], 'ascii')), dtype='float32')
print('Load database content')
db_config = db_con.get_db_config('../config/db_config.json')
con, cur = db_con.create_connection(db_config)
query = "SELECT DISTINCT products.product_name, products.sugars_100g::varchar, retro_vecs.vector FROM products JOIN retro_vecs" +\
" ON ('products.product_name#' || products.product_name) = retro_vecs.word WHERE products.sugars_100g IS NOT NULL"
cur.execute(query)
product_values = dict()
for name, sugar, vec in cur.fetchall():
if product_values.get(sugar) is None:
product_values[sugar] = []
product_values[sugar].append((np.frombuffer(vec, dtype='float32'), name))
number_to_test = '0'
first = product_values.get(number_to_test)[0][0]
for vec, name in product_values.get(number_to_test):
similarity = cosine_similarity(vec.reshape(1, -1), first.reshape(1, -1))
# print(cosine_similarity(vec.reshape(1, -1), group_sugar_values[number_to_test].reshape(1, -1)))
print(name, ': ', similarity)
print('! set breakpoint here !')
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
| [
"db_connection.get_db_config",
"numpy.frombuffer",
"json.load",
"db_connection.create_connection"
] | [((274, 289), 'json.load', 'json.load', (['file'], {}), '(file)\n', (283, 289), False, 'import json\n'), ((600, 648), 'db_connection.get_db_config', 'db_con.get_db_config', (['"""../config/db_config.json"""'], {}), "('../config/db_config.json')\n", (620, 648), True, 'import db_connection as db_con\n'), ((664, 699), 'db_connection.create_connection', 'db_con.create_connection', (['db_config'], {}), '(db_config)\n', (688, 699), True, 'import db_connection as db_con\n'), ((1180, 1215), 'numpy.frombuffer', 'np.frombuffer', (['vec'], {'dtype': '"""float32"""'}), "(vec, dtype='float32')\n", (1193, 1215), True, 'import numpy as np\n')] |
from __future__ import division
from numpy import (e,pi,meshgrid,arange,sin,sqrt,cos,arccos,exp,
zeros,max,random,argmax,argmin,ones_like,array)
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
ledang = 20*pi/180 #degrees
ct = cos(ledang) #cosine of theta
leddist = .05 #meters
dc = leddist*ct #distance times cosine theta
slide_size = .005 #meters
Nxy = array((1280,960))
pixel_size = slide_size/Nxy
Fx,Fy = pixel_size*(1000*(random.rand(2)-.5))
# Slide Coordinates
X,Y = meshgrid(slide_size/Nxy[0] * arange(-Nxy[0]//2,Nxy[0]//2),
slide_size/Nxy[1] * arange(-Nxy[1]//2,Nxy[1]//2))
# Distance from LED to each pixel
distances = sqrt((leddist * sin(ledang))**2
+(leddist*cos(ledang)+X)**2
+Y**2)
# Normalized Inverse Square Mask
invsq = 1/distances**2
invsq /= invsq[Nxy[1]//2,Nxy[0]//2]
#Fx,Fy = .2,0
# Degrees off of LED direction vector
radiation_pattern = arccos((Fx*X+Fx*dc+X*dc+Fy*Y+leddist**2)/
sqrt((Fx**2+2*Fx*dc+Fy**2+leddist**2)*
(X**2+2*X*dc+Y**2+leddist**2)))
# LED fading away from center
Iang = 1 - radiation_pattern**2/1.9
# Image Formation
image = ones_like(X,'uint8')
# Center
cx = X[0,Nxy[0]//2]
cy = Y[Nxy[1]//2,0]
# Gaussian Viniette
Imax= 200
r = sqrt((X-cx)**2+(Y-cy)**2)
r0 = slide_size/5
camera = Imax*exp(-1*(r/r0)**3)
optical = invsq*Iang
image = camera*image
noise = random.poisson(5,image.shape)
image += noise
image = optical*image
plt.figure()
plt.pcolormesh(X,Y,image,cmap='gray',vmin=0,vmax=255)
plt.contour(X,Y,invsq)
plt.colorbar()
plt.plot(Fx,Fy,'wx')
plt.axis([-1*slide_size/2,slide_size/2,-1*slide_size/2,slide_size/2])
plt.figure()
plt.plot(Y[:,0],image[:,Nxy[0]//2])
plt.ylim([0,255])
plt.grid()
plt.figure()
plt.plot(X[0,:],image[Nxy[1]//2,:])
plt.ylim([0,255])
plt.grid()
# Spatially Normalize
C = Imax*ones_like(image)
smooth = gaussian_filter(image+noise,4)
reasonable = smooth>8
C[reasonable] = C[reasonable]/(smooth)[reasonable]
imagenorm = C*(image)
plt.figure()
plt.pcolormesh(X,Y,imagenorm,cmap='gray',vmax=255)
plt.colorbar()
| [
"scipy.ndimage.filters.gaussian_filter",
"numpy.ones_like",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.random.rand",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.random.poisson",
"numpy.cos",
"matplotli... | [((282, 293), 'numpy.cos', 'cos', (['ledang'], {}), '(ledang)\n', (285, 293), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((410, 428), 'numpy.array', 'array', (['(1280, 960)'], {}), '((1280, 960))\n', (415, 428), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((1243, 1264), 'numpy.ones_like', 'ones_like', (['X', '"""uint8"""'], {}), "(X, 'uint8')\n", (1252, 1264), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((1349, 1384), 'numpy.sqrt', 'sqrt', (['((X - cx) ** 2 + (Y - cy) ** 2)'], {}), '((X - cx) ** 2 + (Y - cy) ** 2)\n', (1353, 1384), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((1478, 1508), 'numpy.random.poisson', 'random.poisson', (['(5)', 'image.shape'], {}), '(5, image.shape)\n', (1492, 1508), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((1546, 1558), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1556, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1617), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['X', 'Y', 'image'], {'cmap': '"""gray"""', 'vmin': '(0)', 'vmax': '(255)'}), "(X, Y, image, cmap='gray', vmin=0, vmax=255)\n", (1573, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1613, 1637), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'invsq'], {}), '(X, Y, invsq)\n', (1624, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1636, 1650), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1648, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1651, 1673), 'matplotlib.pyplot.plot', 'plt.plot', (['Fx', 'Fy', '"""wx"""'], {}), "(Fx, Fy, 'wx')\n", (1659, 1673), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1761), 'matplotlib.pyplot.axis', 'plt.axis', (['[-1 * slide_size / 2, slide_size / 2, -1 * slide_size / 2, slide_size / 2]'], {}), '([-1 * slide_size / 2, slide_size / 2, -1 * slide_size / 2, \n slide_size / 2])\n', (1680, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1755), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1753, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1756, 1796), 'matplotlib.pyplot.plot', 'plt.plot', (['Y[:, 0]', 'image[:, Nxy[0] // 2]'], {}), '(Y[:, 0], image[:, Nxy[0] // 2])\n', (1764, 1796), True, 'import matplotlib.pyplot as plt\n'), ((1792, 1810), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 255]'], {}), '([0, 255])\n', (1800, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1810, 1820), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1818, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1833), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1831, 1833), True, 'import matplotlib.pyplot as plt\n'), ((1834, 1874), 'matplotlib.pyplot.plot', 'plt.plot', (['X[0, :]', 'image[Nxy[1] // 2, :]'], {}), '(X[0, :], image[Nxy[1] // 2, :])\n', (1842, 1874), True, 'import matplotlib.pyplot as plt\n'), ((1870, 1888), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 255]'], {}), '([0, 255])\n', (1878, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1888, 1898), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1896, 1898), True, 'import matplotlib.pyplot as plt\n'), ((1957, 1990), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['(image + noise)', '(4)'], {}), '(image + noise, 4)\n', (1972, 1990), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((2083, 2095), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2093, 2095), True, 'import matplotlib.pyplot as plt\n'), ((2096, 2150), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['X', 'Y', 'imagenorm'], {'cmap': '"""gray"""', 'vmax': '(255)'}), "(X, Y, imagenorm, cmap='gray', vmax=255)\n", (2110, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2147, 2161), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2159, 2161), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1431), 'numpy.exp', 'exp', (['(-1 * (r / r0) ** 3)'], {}), '(-1 * (r / r0) ** 3)\n', (1411, 1431), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((1931, 1947), 'numpy.ones_like', 'ones_like', (['image'], {}), '(image)\n', (1940, 1947), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((558, 591), 'numpy.arange', 'arange', (['(-Nxy[0] // 2)', '(Nxy[0] // 2)'], {}), '(-Nxy[0] // 2, Nxy[0] // 2)\n', (564, 591), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((626, 659), 'numpy.arange', 'arange', (['(-Nxy[1] // 2)', '(Nxy[1] // 2)'], {}), '(-Nxy[1] // 2, Nxy[1] // 2)\n', (632, 659), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((1046, 1152), 'numpy.sqrt', 'sqrt', (['((Fx ** 2 + 2 * Fx * dc + Fy ** 2 + leddist ** 2) * (X ** 2 + 2 * X * dc + \n Y ** 2 + leddist ** 2))'], {}), '((Fx ** 2 + 2 * Fx * dc + Fy ** 2 + leddist ** 2) * (X ** 2 + 2 * X *\n dc + Y ** 2 + leddist ** 2))\n', (1050, 1152), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((482, 496), 'numpy.random.rand', 'random.rand', (['(2)'], {}), '(2)\n', (493, 496), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((719, 730), 'numpy.sin', 'sin', (['ledang'], {}), '(ledang)\n', (722, 730), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n'), ((765, 776), 'numpy.cos', 'cos', (['ledang'], {}), '(ledang)\n', (768, 776), False, 'from numpy import e, pi, meshgrid, arange, sin, sqrt, cos, arccos, exp, zeros, max, random, argmax, argmin, ones_like, array\n')] |
import multiprocessing as mp
import os
import numpy as np
from cv2 import imread, imwrite
import selectivesearch.selectivesearch as selectivesearch
from filters import filter_mask_bbox
imagenet_root = '/D_data/Self/imagenet_root/'
imagenet_root_proposals = '/D_data/Self/imagenet_root_ss_mask_proposals_mp'
split = 'train'
scale = 300
min_size = 100
processes_num = 48
class_names = sorted(os.listdir(os.path.join(imagenet_root, split)))[:300]
classes_num = len(class_names)
classes_per_process = classes_num // processes_num + 1
source_path = os.path.join(imagenet_root, split)
target_path = os.path.join(imagenet_root_proposals, split)
def convert(img_with_lbl, regions):
rects = []
label_mask = img_with_lbl[:, :, 3]
output_mask = np.zeros_like(label_mask, dtype=int)
for region_idx, region in enumerate(regions):
rects.append(region['rect'])
for label in region['labels']:
output_mask[label_mask == label] = region_idx + 1
rects = np.array(rects)
assert rects.shape[0] == len(regions)
return output_mask, rects
def process_one_class(process_id, classes_per_process, class_names, source_path, target_path):
print(f"Process id: {process_id} started")
f = open(os.path.join(imagenet_root_proposals, f'num_props_proc{process_id}.txt'), 'w')
processed = 0
for i in range(process_id*classes_per_process, process_id*classes_per_process + classes_per_process):
if i >= len(class_names):
break
class_name = class_names[i]
filenames = sorted(os.listdir(os.path.join(source_path, class_name)))
os.makedirs(os.path.join(target_path, class_name), exist_ok=True)
for f_idx, filename in enumerate(filenames):
base_filename = os.path.splitext(filename)[0]
cur_img_mask_path = os.path.join(target_path, class_name, base_filename+'_mask.npy')
cur_img_bbox_path = os.path.join(target_path, class_name, base_filename+'_bbox.npy')
if os.path.exists(cur_img_bbox_path) and os.path.exists(cur_img_mask_path):
continue
img_path = os.path.join(source_path, class_name, filename)
# img = np.array(Image.open(img_path).convert('RGB')) # w, h -> h, w
img = imread(img_path)
# print(f"Process id: {process_id} {img_path} img shape", img.shape)
img_with_lbl, regions, _ = selectivesearch.selective_search(img, scale=scale, sigma=0.9, min_size=min_size)
img_mask, rects = convert(img_with_lbl, regions)
img_mask_filtered, rects_filtered = filter_mask_bbox(img_mask, rects)
np.save(cur_img_mask_path, img_mask_filtered)
np.save(cur_img_bbox_path, rects_filtered)
f.write(f"{base_filename} {rects_filtered.shape[0]}\n")
processed += 1
print(f"Process {process_id} processed class {class_name} [{processed}/{classes_per_process}]")
print(f"Process {process_id} finished")
f.close()
processes = [mp.Process(target=process_one_class,
args=(process_id, classes_per_process, class_names, source_path, target_path))
for process_id in range(processes_num)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
| [
"numpy.zeros_like",
"numpy.save",
"selectivesearch.selectivesearch.selective_search",
"os.path.exists",
"cv2.imread",
"numpy.array",
"os.path.splitext",
"filters.filter_mask_bbox",
"multiprocessing.Process",
"os.path.join"
] | [((551, 585), 'os.path.join', 'os.path.join', (['imagenet_root', 'split'], {}), '(imagenet_root, split)\n', (563, 585), False, 'import os\n'), ((600, 644), 'os.path.join', 'os.path.join', (['imagenet_root_proposals', 'split'], {}), '(imagenet_root_proposals, split)\n', (612, 644), False, 'import os\n'), ((755, 791), 'numpy.zeros_like', 'np.zeros_like', (['label_mask'], {'dtype': 'int'}), '(label_mask, dtype=int)\n', (768, 791), True, 'import numpy as np\n'), ((992, 1007), 'numpy.array', 'np.array', (['rects'], {}), '(rects)\n', (1000, 1007), True, 'import numpy as np\n'), ((3029, 3148), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'process_one_class', 'args': '(process_id, classes_per_process, class_names, source_path, target_path)'}), '(target=process_one_class, args=(process_id, classes_per_process,\n class_names, source_path, target_path))\n', (3039, 3148), True, 'import multiprocessing as mp\n'), ((1237, 1309), 'os.path.join', 'os.path.join', (['imagenet_root_proposals', 'f"""num_props_proc{process_id}.txt"""'], {}), "(imagenet_root_proposals, f'num_props_proc{process_id}.txt')\n", (1249, 1309), False, 'import os\n'), ((407, 441), 'os.path.join', 'os.path.join', (['imagenet_root', 'split'], {}), '(imagenet_root, split)\n', (419, 441), False, 'import os\n'), ((1627, 1664), 'os.path.join', 'os.path.join', (['target_path', 'class_name'], {}), '(target_path, class_name)\n', (1639, 1664), False, 'import os\n'), ((1825, 1891), 'os.path.join', 'os.path.join', (['target_path', 'class_name', "(base_filename + '_mask.npy')"], {}), "(target_path, class_name, base_filename + '_mask.npy')\n", (1837, 1891), False, 'import os\n'), ((1922, 1988), 'os.path.join', 'os.path.join', (['target_path', 'class_name', "(base_filename + '_bbox.npy')"], {}), "(target_path, class_name, base_filename + '_bbox.npy')\n", (1934, 1988), False, 'import os\n'), ((2125, 2172), 'os.path.join', 'os.path.join', (['source_path', 'class_name', 'filename'], {}), '(source_path, class_name, filename)\n', (2137, 2172), False, 'import os\n'), ((2273, 2289), 'cv2.imread', 'imread', (['img_path'], {}), '(img_path)\n', (2279, 2289), False, 'from cv2 import imread, imwrite\n'), ((2412, 2497), 'selectivesearch.selectivesearch.selective_search', 'selectivesearch.selective_search', (['img'], {'scale': 'scale', 'sigma': '(0.9)', 'min_size': 'min_size'}), '(img, scale=scale, sigma=0.9, min_size=min_size\n )\n', (2444, 2497), True, 'import selectivesearch.selectivesearch as selectivesearch\n'), ((2603, 2636), 'filters.filter_mask_bbox', 'filter_mask_bbox', (['img_mask', 'rects'], {}), '(img_mask, rects)\n', (2619, 2636), False, 'from filters import filter_mask_bbox\n'), ((2650, 2695), 'numpy.save', 'np.save', (['cur_img_mask_path', 'img_mask_filtered'], {}), '(cur_img_mask_path, img_mask_filtered)\n', (2657, 2695), True, 'import numpy as np\n'), ((2708, 2750), 'numpy.save', 'np.save', (['cur_img_bbox_path', 'rects_filtered'], {}), '(cur_img_bbox_path, rects_filtered)\n', (2715, 2750), True, 'import numpy as np\n'), ((1567, 1604), 'os.path.join', 'os.path.join', (['source_path', 'class_name'], {}), '(source_path, class_name)\n', (1579, 1604), False, 'import os\n'), ((1762, 1788), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1778, 1788), False, 'import os\n'), ((2003, 2036), 'os.path.exists', 'os.path.exists', (['cur_img_bbox_path'], {}), '(cur_img_bbox_path)\n', (2017, 2036), False, 'import os\n'), ((2041, 2074), 'os.path.exists', 'os.path.exists', (['cur_img_mask_path'], {}), '(cur_img_mask_path)\n', (2055, 2074), False, 'import os\n')] |
import sys
import joblib
import numpy as np
import pandas as pd
from utils.model_utils import get_model
from azureml.core import Model
def parse_args():
model_name_param = [sys.argv[idx+1] for idx,item in enumerate(sys.argv) if item == '--model-name']
if len(model_name_param) == 0:
raise ValueError('model name must be provided')
model_name = model_name_param[0]
model_version_param = [sys.argv[idx+1] for idx,item in enumerate(sys.argv) if item == '--model-version']
model_version = None if len(model_version_param) == 0 or len(model_version_param[0].strip()) == 0 else model_version_param[0]
model_tag_name_param = [sys.argv[idx+1] for idx,item in enumerate(sys.argv) if item == '--model-tag-name']
model_tag_name = None if len(model_tag_name_param) == 0 or len(model_tag_name_param[0].strip()) == 0 else model_tag_name_param[0]
model_tag_value_param = [sys.argv[idx+1] for idx,item in enumerate(sys.argv) if item == '--model-tag-value']
model_tag_version = None if len(model_tag_value_param) == 0 or len(model_tag_value_param[0].strip()) == 0 else model_tag_value_param[0]
return [model_name,model_version,model_tag_name,model_tag_version]
def init():
try:
print('Loading Model')
model_params = parse_args()
aml_model = get_model(model_name=model_params[0],
model_version=model_params[1],
tag_name=model_params[2],
tag_value=model_params[3])
global model
model_path = Model.get_model_path(model_name=aml_model.name,
version=aml_model.version)
model = joblib.load(model_path)
print(f'model:{aml_model.name} downloading is successful')
except Exception as ex:
print(ex)
def run(mini_batch):
try:
result = None
for _,row in mini_batch.iterrows():
pred = model.predict(row.values.reshape(1,-1))
result = (np.array(pred) if result is None else np.vstack([result,pred]))
return ([] if result is None else pd.DataFrame(result,columns=['score']))
except Exception as ex:
print(ex)
| [
"pandas.DataFrame",
"azureml.core.Model.get_model_path",
"utils.model_utils.get_model",
"numpy.array",
"joblib.load",
"numpy.vstack"
] | [((1322, 1447), 'utils.model_utils.get_model', 'get_model', ([], {'model_name': 'model_params[0]', 'model_version': 'model_params[1]', 'tag_name': 'model_params[2]', 'tag_value': 'model_params[3]'}), '(model_name=model_params[0], model_version=model_params[1],\n tag_name=model_params[2], tag_value=model_params[3])\n', (1331, 1447), False, 'from utils.model_utils import get_model\n'), ((1576, 1650), 'azureml.core.Model.get_model_path', 'Model.get_model_path', ([], {'model_name': 'aml_model.name', 'version': 'aml_model.version'}), '(model_name=aml_model.name, version=aml_model.version)\n', (1596, 1650), False, 'from azureml.core import Model\n'), ((1709, 1732), 'joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (1720, 1732), False, 'import joblib\n'), ((2139, 2178), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': "['score']"}), "(result, columns=['score'])\n", (2151, 2178), True, 'import pandas as pd\n'), ((2024, 2038), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (2032, 2038), True, 'import numpy as np\n'), ((2062, 2087), 'numpy.vstack', 'np.vstack', (['[result, pred]'], {}), '([result, pred])\n', (2071, 2087), True, 'import numpy as np\n')] |
#############################################
# #
# <NAME> #
# ECE 351-51 #
# Lab 4 #
# 2/18/2020 #
# #
# #
#############################################
import numpy as np
import matplotlib.pyplot as plt
import scipy as sig
#############################################################
# User Defined Funtions
# Ramp funtion
def r(t):
y = np.zeros(t.shape) #t.shape of whatever is inputted in
for i in range(len(t)): # run the loop once for each index of t
if t[i] >= 0:
y[i] = t[i]
else:
y[i] = 0
return y #send back the output stored in an array
# Step Function
def u(t):
y = np.zeros(t.shape) #t.shape of whatever is inputted in
for i in range(len(t)): # run the loop once for each index of t
if t[i] >= 0:
y[i] = 1
else:
y[i] = 0
return y #send back the output stored in an array
#############################################################
#%% Section 3:3.3
f0 = 0.25 # in Hz
w0 = 2*np.pi*f0
def h1(t):
y = np.exp(2*t)*u(1-t)
return y
def h2(t):
y = u(t-2) - u(t-6)
return y
def h3(t):
y = np.cos(w0*t)*u(t)
return y
steps = 1e-2
t = np.arange(-10,10 + steps, steps)
plt.figure(figsize = (10, 7))
plt.subplot(3, 1, 1)
plt.plot(t, h1(t))
plt.title('User Defined Funtions')
plt.grid()
plt.ylabel('h1(t)')
plt.subplot(3, 1, 2)
plt.plot(t, h2(t))
plt.grid()
plt.ylabel('h2(t)')
plt.subplot(3, 1, 3)
plt.plot(t, h3(t))
plt.grid()
plt.xlabel('t')
plt.ylabel('h3(t)')
plt.show()
#############################################################
#%% Section 4:4.3
def convo(t1,t2):
t1_len = len(t1)
t2_len = len(t2)
Et1 = np.append(t1,np.zeros((1,t2_len - 1))) # extend the first function
Et2 = np.append(t2,np.zeros((1,t1_len - 1)))# extend the first function
res = np.zeros(Et1.shape) # works when we start at zero
for i in range((t1_len + t2_len) - 2 ): # First Loop
res[i] = 0
for j in range(t1_len): # Second Loop
if(i-j+1 > 0):
try:
res[i] += (Et1[j] * Et2[i-j+1])
except:
print(i,j)
return res
steps = 1e-2
t = np.arange(-10,10 + steps, steps)
NN = len(t)
# works with functions that start less than 0
tE = np.arange(2*t[0],2*t[NN-1]+steps,steps)
con_h1_u = convo(h1(t),u(t))*steps
con_h2_u = convo(h2(t),u(t))*steps
con_h3_u = convo(h3(t),u(t))*steps
con_h1_ucheck = sig.convolve(h1(t),u(t))*steps
con_h2_ucheck = sig.convolve(h2(t),u(t))*steps
con_h3_ucheck = sig.convolve(h3(t),u(t))*steps
con_h1_uhand = ((0.5*np.exp(2*t))*u(1-t)) + (0.5*np.exp(2)*u(t-1))
con_h2_uhand = (r(t-2) - r(t-6))*u(t)
con_h3_uhand = ((1/w0)*(np.sin(w0*t)))*u(t)
plt.figure(figsize = (10,7))
plt.subplot(3,1,1)
plt.plot(tE,con_h1_u, label='User-defined')
plt.plot(tE,con_h1_ucheck, '--', label='Built-in')
plt.ylabel('h1(t) * u(t)')
plt.grid()
plt.legend()
plt.title('Check with sig.convolve')
plt.subplot(3,1,2)
plt.plot(tE,con_h2_u, label='User-defined')
plt.plot(tE,con_h2_ucheck,'--', label='Built-in')
plt.ylabel('h2(t) * u(t)')
plt.grid()
plt.legend()
plt.subplot(3,1,3)
plt.plot(tE,con_h3_u, label='User-defined')
plt.plot(tE,con_h3_ucheck,'--', label='Built-in')
plt.ylabel('h2(t) * u(t)')
plt.grid()
plt.legend()
plt.show()
plt.figure(figsize = (10,7))
plt.subplot(3,1,1)
plt.plot(t,con_h1_uhand, 'r--', label='Hand Calculation')
plt.ylabel('h1(t) * u(t)')
plt.grid()
plt.legend()
plt.title('Hand Calculations')
plt.subplot(3,1,2)
plt.plot(t,con_h2_uhand, 'r--', label='Hand Calculation')
plt.ylabel('h2(t) * u(t)')
plt.grid()
plt.legend()
plt.subplot(3,1,3)
plt.plot(t,con_h3_uhand, 'r--', label='Hand Calculation')
plt.ylabel('h2(t) * u(t)')
plt.grid()
plt.legend()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
... | [((1447, 1480), 'numpy.arange', 'np.arange', (['(-10)', '(10 + steps)', 'steps'], {}), '(-10, 10 + steps, steps)\n', (1456, 1480), True, 'import numpy as np\n'), ((1481, 1508), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1491, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1511, 1531), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (1522, 1531), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1585), 'matplotlib.pyplot.title', 'plt.title', (['"""User Defined Funtions"""'], {}), "('User Defined Funtions')\n", (1560, 1585), True, 'import matplotlib.pyplot as plt\n'), ((1586, 1596), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1594, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1617), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h1(t)"""'], {}), "('h1(t)')\n", (1608, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1620, 1640), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (1631, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1660, 1670), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1668, 1670), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1691), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h2(t)"""'], {}), "('h2(t)')\n", (1682, 1691), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1713), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (1704, 1713), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1744), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1742, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1746, 1761), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (1756, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1762, 1781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h3(t)"""'], {}), "('h3(t)')\n", (1772, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1790, 1792), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2535), 'numpy.arange', 'np.arange', (['(-10)', '(10 + steps)', 'steps'], {}), '(-10, 10 + steps, steps)\n', (2511, 2535), True, 'import numpy as np\n'), ((2602, 2651), 'numpy.arange', 'np.arange', (['(2 * t[0])', '(2 * t[NN - 1] + steps)', 'steps'], {}), '(2 * t[0], 2 * t[NN - 1] + steps, steps)\n', (2611, 2651), True, 'import numpy as np\n'), ((3042, 3069), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (3052, 3069), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3091), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (3082, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3090, 3134), 'matplotlib.pyplot.plot', 'plt.plot', (['tE', 'con_h1_u'], {'label': '"""User-defined"""'}), "(tE, con_h1_u, label='User-defined')\n", (3098, 3134), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3185), 'matplotlib.pyplot.plot', 'plt.plot', (['tE', 'con_h1_ucheck', '"""--"""'], {'label': '"""Built-in"""'}), "(tE, con_h1_ucheck, '--', label='Built-in')\n", (3142, 3185), True, 'import matplotlib.pyplot as plt\n'), ((3185, 3211), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h1(t) * u(t)"""'], {}), "('h1(t) * u(t)')\n", (3195, 3211), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3222), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3220, 3222), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3235), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3233, 3235), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3272), 'matplotlib.pyplot.title', 'plt.title', (['"""Check with sig.convolve"""'], {}), "('Check with sig.convolve')\n", (3245, 3272), True, 'import matplotlib.pyplot as plt\n'), ((3274, 3294), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (3285, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3293, 3337), 'matplotlib.pyplot.plot', 'plt.plot', (['tE', 'con_h2_u'], {'label': '"""User-defined"""'}), "(tE, con_h2_u, label='User-defined')\n", (3301, 3337), True, 'import matplotlib.pyplot as plt\n'), ((3337, 3388), 'matplotlib.pyplot.plot', 'plt.plot', (['tE', 'con_h2_ucheck', '"""--"""'], {'label': '"""Built-in"""'}), "(tE, con_h2_ucheck, '--', label='Built-in')\n", (3345, 3388), True, 'import matplotlib.pyplot as plt\n'), ((3387, 3413), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h2(t) * u(t)"""'], {}), "('h2(t) * u(t)')\n", (3397, 3413), True, 'import matplotlib.pyplot as plt\n'), ((3414, 3424), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3422, 3424), True, 'import matplotlib.pyplot as plt\n'), ((3425, 3437), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3435, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3439, 3459), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (3450, 3459), True, 'import matplotlib.pyplot as plt\n'), ((3458, 3502), 'matplotlib.pyplot.plot', 'plt.plot', (['tE', 'con_h3_u'], {'label': '"""User-defined"""'}), "(tE, con_h3_u, label='User-defined')\n", (3466, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3502, 3553), 'matplotlib.pyplot.plot', 'plt.plot', (['tE', 'con_h3_ucheck', '"""--"""'], {'label': '"""Built-in"""'}), "(tE, con_h3_ucheck, '--', label='Built-in')\n", (3510, 3553), True, 'import matplotlib.pyplot as plt\n'), ((3552, 3578), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h2(t) * u(t)"""'], {}), "('h2(t) * u(t)')\n", (3562, 3578), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3589), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3587, 3589), True, 'import matplotlib.pyplot as plt\n'), ((3590, 3602), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3600, 3602), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3613), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3611, 3613), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (3626, 3643), True, 'import matplotlib.pyplot as plt\n'), ((3645, 3665), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (3656, 3665), True, 'import matplotlib.pyplot as plt\n'), ((3664, 3722), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'con_h1_uhand', '"""r--"""'], {'label': '"""Hand Calculation"""'}), "(t, con_h1_uhand, 'r--', label='Hand Calculation')\n", (3672, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3722, 3748), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h1(t) * u(t)"""'], {}), "('h1(t) * u(t)')\n", (3732, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3749, 3759), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3757, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3760, 3772), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3770, 3772), True, 'import matplotlib.pyplot as plt\n'), ((3773, 3803), 'matplotlib.pyplot.title', 'plt.title', (['"""Hand Calculations"""'], {}), "('Hand Calculations')\n", (3782, 3803), True, 'import matplotlib.pyplot as plt\n'), ((3805, 3825), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (3816, 3825), True, 'import matplotlib.pyplot as plt\n'), ((3824, 3882), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'con_h2_uhand', '"""r--"""'], {'label': '"""Hand Calculation"""'}), "(t, con_h2_uhand, 'r--', label='Hand Calculation')\n", (3832, 3882), True, 'import matplotlib.pyplot as plt\n'), ((3882, 3908), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h2(t) * u(t)"""'], {}), "('h2(t) * u(t)')\n", (3892, 3908), True, 'import matplotlib.pyplot as plt\n'), ((3909, 3919), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3917, 3919), True, 'import matplotlib.pyplot as plt\n'), ((3920, 3932), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3930, 3932), True, 'import matplotlib.pyplot as plt\n'), ((3934, 3954), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (3945, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3953, 4011), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'con_h3_uhand', '"""r--"""'], {'label': '"""Hand Calculation"""'}), "(t, con_h3_uhand, 'r--', label='Hand Calculation')\n", (3961, 4011), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4037), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""h2(t) * u(t)"""'], {}), "('h2(t) * u(t)')\n", (4021, 4037), True, 'import matplotlib.pyplot as plt\n'), ((4038, 4048), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4046, 4048), True, 'import matplotlib.pyplot as plt\n'), ((4049, 4061), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4059, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4062, 4072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4070, 4072), True, 'import matplotlib.pyplot as plt\n'), ((608, 625), 'numpy.zeros', 'np.zeros', (['t.shape'], {}), '(t.shape)\n', (616, 625), True, 'import numpy as np\n'), ((903, 920), 'numpy.zeros', 'np.zeros', (['t.shape'], {}), '(t.shape)\n', (911, 920), True, 'import numpy as np\n'), ((2113, 2132), 'numpy.zeros', 'np.zeros', (['Et1.shape'], {}), '(Et1.shape)\n', (2121, 2132), True, 'import numpy as np\n'), ((1296, 1309), 'numpy.exp', 'np.exp', (['(2 * t)'], {}), '(2 * t)\n', (1302, 1309), True, 'import numpy as np\n'), ((1397, 1411), 'numpy.cos', 'np.cos', (['(w0 * t)'], {}), '(w0 * t)\n', (1403, 1411), True, 'import numpy as np\n'), ((1968, 1993), 'numpy.zeros', 'np.zeros', (['(1, t2_len - 1)'], {}), '((1, t2_len - 1))\n', (1976, 1993), True, 'import numpy as np\n'), ((2045, 2070), 'numpy.zeros', 'np.zeros', (['(1, t1_len - 1)'], {}), '((1, t1_len - 1))\n', (2053, 2070), True, 'import numpy as np\n'), ((3020, 3034), 'numpy.sin', 'np.sin', (['(w0 * t)'], {}), '(w0 * t)\n', (3026, 3034), True, 'import numpy as np\n'), ((2912, 2925), 'numpy.exp', 'np.exp', (['(2 * t)'], {}), '(2 * t)\n', (2918, 2925), True, 'import numpy as np\n'), ((2940, 2949), 'numpy.exp', 'np.exp', (['(2)'], {}), '(2)\n', (2946, 2949), True, 'import numpy as np\n')] |
import networkx as nx
import numpy as np
import sys
class GraphCleaner(object):
"""docstring for GraphCleaner"""
def __init__(self, handle):
super(GraphCleaner, self).__init__()
self.handle = handle
self.G = nx.read_gpickle('{0} Graph with PWIs.pkl'.format(self.handle))
def run(self):
self.remove_zero_pwi_edges()
self.remove_full_edges_diff_subtypes()
self.remove_full_edges_with_nan()
self.remove_reassortant_edges_with_nan()
self.remove_reassortant_edges_with_subtype_mismatch()
self.change_mixed_to_Mixed()
self.save_output()
def save_output(self):
nx.write_gpickle(self.G, '{0} Final Graph.pkl'.format(self.handle))
def change_mixed_to_Mixed(self):
for n, d in self.G.nodes(data=True):
if d['subtype'] == 'mixed':
self.G.node[n]['subtype'] = 'Mixed'
def remove_zero_pwi_edges(self):
for sc, sk, d in self.G.edges(data=True):
if d['pwi'] == 0 and (sc, sk) in self.G.edges():
self.G.remove_edge(sc, sk)
def remove_full_edges_diff_subtypes(self):
for sc, sk, d in self.G.edges(data=True):
if d['edge_type'] == 'full_complement':
sc_subtype = self.G.node[sc]['subtype']
sk_subtype = self.G.node[sk]['subtype']
mixed = ['mixed', 'Mixed']
if sc_subtype not in mixed and sk_subtype not in mixed:
if sc_subtype != sk_subtype:
self.G.remove_edge(sc, sk)
def remove_full_edges_with_nan(self):
for sc, sk, d in self.G.edges(data=True):
if d['edge_type'] == 'full_complement':
for seg, val in d['segments'].items():
if np.isnan(val):
self.G.remove_edge(sc, sk)
break
def has_nan(self, edge_with_data):
has_nan = False
_, _, d = edge_with_data
for k, v in d['segments'].items():
if np.isnan(v):
has_nan = True
break
return has_nan
def remove_in_edges(self, node):
for sc, sk in self.G.in_edges(node):
if (sc, sk) in self.G.edges():
self.G.remove_edge(sc, sk)
def remove_reassortant_edges_with_nan(self):
for sc, sk, d in self.G.edges(data=True):
if d['edge_type'] == 'reassortant' and self.has_nan((sc, sk, d)):
if (sc, sk) in self.G.edges():
self.remove_in_edges(sk)
def remove_reassortant_edges_with_subtype_mismatch(self):
def get_ha_subtype(node):
mixed = ['mixed', 'Mixed']
subtype = self.G.node[node]['subtype']
if subtype not in mixed:
return subtype.split('N')[0].split('H')[1]
else:
return subtype
def get_na_subtype(node):
mixed = ['mixed', 'Mixed']
subtype = self.G.node[node]['subtype']
if subtype not in mixed:
return subtype.split('N')[1]
else:
return subtype
for sc, sk, d in self.G.edges(data=True):
sc_subtype = self.G.node[sc]['subtype']
sk_subtype = self.G.node[sk]['subtype']
sc_ha = get_ha_subtype(sc)
sk_ha = get_ha_subtype(sk)
sc_na = get_na_subtype(sc)
sk_na = get_na_subtype(sk)
mixed = ['mixed', 'Mixed']
if 4 in d['segments'].keys():
if sc_ha != sk_ha and sc_ha not in mixed and sk_ha not in mixed:
self.remove_in_edges(sk)
if 6 in d['segments'].keys():
if sc_na != sk_na and sc_na not in mixed and sk_na not in mixed:
self.remove_in_edges(sk)
if __name__ == '__main__':
handle = sys.argv[1]
gc = GraphCleaner(handle)
gc.run()
| [
"numpy.isnan"
] | [((1707, 1718), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (1715, 1718), True, 'import numpy as np\n'), ((1522, 1535), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (1530, 1535), True, 'import numpy as np\n')] |
import sys
import numpy as np
def main():
args = sys.argv
# args[1]:threshold number word [2]:number of topic
# [3]:cancer_type
K = int(args[2])
if(K <= 9):
topic = '0' + args[2]
else:
topic = args[2]
input_file = 'result/data4_o' + args[1] + '_' + args[3] \
+ '/result_k' + topic + '.txt'
n2_data = load_data(input_file, K)
dictionary_file = 'data/dictionary/data4_o' + args[1] \
+ '_' + args[3] + '.txt'
temp_dictionary = load_dictionary(dictionary_file)
dictionary = load_dictionary('data/dictionary.txt')
n1_data = to_n1(n2_data, dictionary, temp_dictionary, K)
output_file = 'result/data4_o' + args[1] + '_' + args[3] \
+ '/figure/' + args[2] + '/k' + topic + '.txt'
write_data(output_file, n1_data, K)
def load_data(input_file, K):
result = open(input_file,'r')
lines = result.readlines()
count = -2
for line in lines:
if(count == 0):
probability = line.split(' ')
data = np.zeros([K,len(probability) - 1])
for i in range(len(probability) - 1):
data[count,i] = float(probability[i])
elif((count > 0) and (count < K)):
probability = line.split(' ')
for i in range(len(probability) - 1):
data[count,i] = float(probability[i])
count += 1
result.close()
return data
def load_dictionary(dictionary_file):
dictionary = list()
result = open(dictionary_file, 'r')
lines = result.readlines()
for line in lines:
text = line[:-1]
dictionary.append(text)
return dictionary
def to_n1(n2_data, dictionary, temp_dictionary, K):
V = len(temp_dictionary)
index = [0 for i in range(V)]
for k in range(V):
i = dictionary.index(temp_dictionary[k])
forward = (i % 384) // 96
substitution = ((i % 384) % 96) //16
backward = (((i % 384) % 96) % 16) //4
index[k] = 24 * forward + 4 * substitution + backward
data = np.zeros([K,96])
for k in range(K):
for i in range(V):
data[k,index[i]] += n2_data[k,i]
for k in range(K):
sum = 0
for i in range(96):
sum += data[k,i]
for i in range(96):
data[k,i] /= sum
return data
def write_data(output_file, n1_data, K):
output = open(output_file, 'w')
output.write('0\n')
output.write('0\n')
for k in range(K):
for i in range(96):
if(i != 95):
string = str(n1_data[k,i]) + ' '
else:
string = str(n1_data[k,i]) + '\n'
output.write(string)
output.close()
if __name__ == '__main__':
main()
| [
"numpy.zeros"
] | [((2050, 2067), 'numpy.zeros', 'np.zeros', (['[K, 96]'], {}), '([K, 96])\n', (2058, 2067), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import io
import numpy as np
import torch
import torch.nn.functional as F
from .fairseq_dataset import FairseqDataset
from .data_utils import compute_mask_indices, get_buckets, get_bucketed_sizes
# from fairseq.data.audio.audio_utils import (
# parse_path,
# read_from_stored_zip,
# is_sf_audio_data,
# )
import soundfile as sf
import cv2
import torchvision
import math
logger = logging.getLogger(__name__)
class RawAudioDataset(FairseqDataset):
def __init__(
self,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.min_sample_size = min_sample_size
self.pad = pad
self.shuffle = shuffle
self.normalize = normalize
self.compute_mask_indices = compute_mask_indices
self.max_visual_frame = math.floor(self.max_sample_size / (self.sample_rate * 0.04))
if self.compute_mask_indices:
self.mask_compute_kwargs = mask_compute_kwargs
self._features_size_map = {}
self._C = mask_compute_kwargs["encoder_embed_dim"]
self._conv_feature_layers = eval(mask_compute_kwargs["conv_feature_layers"])
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
assert feats.dim() == 1, feats.dim()
if self.normalize:
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def crop_to_max_size(self, audio_source, audio_target_size, visual_source, visual_target_size):
size = visual_source.size(2)
diff = size - visual_target_size
if diff <= 0:
return audio_source[:audio_target_size],visual_source.squeeze(0)[:, :]
# # random start and end
# v_start = np.random.randint(0, diff + 1)
# v_end = v_start+visual_target_size
# a_start=round((v_start/112)*0.04*self.sample_rate)
# a_end=a_start+audio_target_size
# return audio_source[a_start:a_end],visual_source.squeeze(0)[:,v_start:v_end]
# start from beginning
if not self.pad:
return audio_source[:audio_target_size], visual_source.squeeze(0)[:, :visual_target_size]
else:
return audio_source[:audio_target_size], visual_source.squeeze(0)[:, :]
def _compute_mask_indices(self, dims, padding_mask):
B, T, C = dims
mask_indices, mask_channel_indices = None, None
if self.mask_compute_kwargs["mask_prob"] > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_compute_kwargs["mask_prob"],
self.mask_compute_kwargs["mask_length"],
self.mask_compute_kwargs["mask_selection"],
self.mask_compute_kwargs["mask_other"],
min_masks=2,
no_overlap=self.mask_compute_kwargs["no_mask_overlap"],
min_space=self.mask_compute_kwargs["mask_min_space"],
)
mask_indices = torch.from_numpy(mask_indices)
if self.mask_compute_kwargs["mask_channel_prob"] > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_compute_kwargs["mask_channel_prob"],
self.mask_compute_kwargs["mask_channel_length"],
self.mask_compute_kwargs["mask_channel_selection"],
self.mask_compute_kwargs["mask_channel_other"],
no_overlap=self.mask_compute_kwargs["no_mask_channel_overlap"],
min_space=self.mask_compute_kwargs["mask_channel_min_space"],
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices).unsqueeze(1).expand(-1, T, -1)
)
return mask_indices, mask_channel_indices
@staticmethod
def _bucket_tensor(tensor, num_pad, value):
return F.pad(tensor, (0, num_pad), value=value)
def collater(self, samples):
samples = [s for s in samples if s["audio_source"] is not None]
if len(samples) == 0:
return {}
audio_sources = [s["audio_source"] for s in samples]
visual_sources = [s["visual_source"] for s in samples]
audio_sizes = [len(s) for s in audio_sources]
visual_sizes = [s.size(-1) for s in visual_sources]
if self.pad:
audio_target_size = min(max(audio_sizes), self.max_sample_size)
visual_target_size = min(max(visual_sizes), self.max_visual_frame * 112)
else:
# cropping
audio_target_size = min(min(audio_sizes), self.max_sample_size)
visual_target_size = min(min(visual_sizes), self.max_visual_frame * 112)
audio_target_size = int((visual_target_size / 112) * 0.04 * self.sample_rate)
collated_audio_sources = audio_sources[0].new_zeros(len(audio_sources), audio_target_size)
collated_visual_sources = list()
audio_padding_mask = (
torch.BoolTensor(collated_audio_sources.shape).fill_(False) if self.pad else None
)
# FIXME visual在这儿不管padding,要padding的话在过完MoCo之后再padding到最长,补上padding_mask
for i, (audio_source, audio_size, visual_source, visual_size) in enumerate(
zip(audio_sources, audio_sizes, visual_sources, visual_sizes)):
audio_diff = audio_size - audio_target_size
if audio_diff == 0:
collated_audio_sources[i] = audio_source
collated_visual_sources.append(visual_source.squeeze(0))
elif audio_diff < 0:
assert self.pad
collated_audio_sources[i] = torch.cat(
[audio_source, audio_source.new_full((-audio_diff,), 0.0)]
)
audio_padding_mask[i, audio_diff:] = True
collated_visual_sources.append(visual_source.squeeze(0))
else:
collated_audio_sources[i], tmp = self.crop_to_max_size(audio_source, audio_target_size, visual_source,
visual_target_size)
collated_visual_sources.append(tmp.view(112, -1))
input = {"audio_source": collated_audio_sources, "visual_source": collated_visual_sources}
out = {"id": torch.LongTensor([s["id"] for s in samples])}
if self.pad:
input["padding_mask"] = audio_padding_mask
out["net_input"] = input
return out
def _get_mask_indices_dims(self, size, padding=0, dilation=1):
if size not in self._features_size_map:
L_in = size
for (_, kernel_size, stride) in self._conv_feature_layers:
L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1
L_out = 1 + L_out // stride
L_in = L_out
self._features_size_map[size] = L_out
return self._features_size_map[size]
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if self.pad:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
order.append(
np.minimum(
np.array(self.sizes),
self.max_sample_size,
)
)
return np.lexsort(order)[::-1]
else:
return np.arange(len(self))
def set_bucket_info(self, num_buckets):
self.num_buckets = num_buckets
if self.num_buckets > 0:
self._collated_sizes = np.minimum(
np.array(self.sizes),
self.max_sample_size,
)
self.buckets = get_buckets(
self._collated_sizes,
self.num_buckets,
)
self._bucketed_sizes = get_bucketed_sizes(
self._collated_sizes, self.buckets
)
logger.info(
f"{len(self.buckets)} bucket(s) for the audio dataset: "
f"{self.buckets}"
)
class FileMMDataset(RawAudioDataset):
def __init__(
self,
manifest_path,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
num_buckets=0,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
pad=pad,
normalize=normalize,
compute_mask_indices=compute_mask_indices,
**mask_compute_kwargs,
)
skipped = 0
self.audio_fnames = []
self.visual_fnames = []
self.line_inds = set()
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for i, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 3, line
sz = int(items[2])
if (min_sample_size is not None and sz < min_sample_size) or (max_sample_size is not None and sz > max_sample_size):
skipped += 1
continue
self.visual_fnames.append(items[0])
self.audio_fnames.append(items[1])
self.line_inds.add(i)
self.sizes.append(sz)
logger.info(
f"loaded {len(self.visual_fnames)} visual sample, loaded {len(self.audio_fnames)} audio sample,skipped {skipped} samples")
try:
import pyarrow
self.audio_fnames = pyarrow.array(self.audio_fnames)
self.visual_fnames = pyarrow.array(self.visual_fnames)
except:
logger.debug(
"Could not create a pyarrow array. Please install pyarrow for better performance"
)
pass
def __getitem__(self, index):
audio_fname = os.path.join(self.root_dir, str(self.audio_fnames[index]))
visual_fname = os.path.join(self.root_dir, str(self.visual_fnames[index]))
wav, curr_sample_rate = sf.read(audio_fname)
audio_feats = torch.from_numpy(wav).float()
audio_feats = self.postprocess(audio_feats, curr_sample_rate)
img = cv2.imread(visual_fname, cv2.IMREAD_GRAYSCALE)
visual_feats = torchvision.transforms.functional.to_tensor(img)
return {"id": index, "audio_source": audio_feats, "visual_source": visual_feats}
class BinarizedAudioDataset(RawAudioDataset):
def __init__(
self,
data_dir,
split,
sample_rate,
max_sample_size=None,
min_sample_size=0,
shuffle=True,
pad=False,
normalize=False,
num_buckets=0,
compute_mask_indices=False,
**mask_compute_kwargs,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
pad=pad,
normalize=normalize,
compute_mask_indices=compute_mask_indices,
**mask_compute_kwargs,
)
from fairseq.data import data_utils, Dictionary
self.fnames_dict = Dictionary.load(os.path.join(data_dir, "dict.txt"))
root_path = os.path.join(data_dir, f"{split}.root")
if os.path.exists(root_path):
with open(root_path, "r") as f:
self.root_dir = next(f).strip()
else:
self.root_dir = None
fnames_path = os.path.join(data_dir, split)
self.fnames = data_utils.load_indexed_dataset(fnames_path, self.fnames_dict)
lengths_path = os.path.join(data_dir, f"{split}.lengths")
with open(lengths_path, "r") as f:
for line in f:
sz = int(line.rstrip())
assert (
sz >= min_sample_size
), f"Min sample size is not supported for binarized dataset, but found a sample with size {sz}"
self.sizes.append(sz)
self.sizes = np.array(self.sizes, dtype=np.int64)
self.set_bucket_info(num_buckets)
logger.info(f"loaded {len(self.fnames)} samples")
def __getitem__(self, index):
fname = self.fnames_dict.string(self.fnames[index], separator="")
if self.root_dir:
fname = os.path.join(self.root_dir, fname)
wav, curr_sample_rate = sf.read(fname)
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
return {"id": index, "source": feats}
| [
"torch.from_numpy",
"soundfile.read",
"torchvision.transforms.functional.to_tensor",
"torch.LongTensor",
"numpy.lexsort",
"math.floor",
"os.path.exists",
"cv2.imread",
"numpy.array",
"torch.nn.functional.layer_norm",
"fairseq.data.data_utils.load_indexed_dataset",
"pyarrow.array",
"torch.no_... | [((610, 637), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (627, 637), False, 'import logging\n'), ((1400, 1460), 'math.floor', 'math.floor', (['(self.max_sample_size / (self.sample_rate * 0.04))'], {}), '(self.max_sample_size / (self.sample_rate * 0.04))\n', (1410, 1460), False, 'import math\n'), ((4813, 4853), 'torch.nn.functional.pad', 'F.pad', (['tensor', '(0, num_pad)'], {'value': 'value'}), '(tensor, (0, num_pad), value=value)\n', (4818, 4853), True, 'import torch.nn.functional as F\n'), ((11541, 11561), 'soundfile.read', 'sf.read', (['audio_fname'], {}), '(audio_fname)\n', (11548, 11561), True, 'import soundfile as sf\n'), ((11699, 11745), 'cv2.imread', 'cv2.imread', (['visual_fname', 'cv2.IMREAD_GRAYSCALE'], {}), '(visual_fname, cv2.IMREAD_GRAYSCALE)\n', (11709, 11745), False, 'import cv2\n'), ((11769, 11817), 'torchvision.transforms.functional.to_tensor', 'torchvision.transforms.functional.to_tensor', (['img'], {}), '(img)\n', (11812, 11817), False, 'import torchvision\n'), ((12804, 12843), 'os.path.join', 'os.path.join', (['data_dir', 'f"""{split}.root"""'], {}), "(data_dir, f'{split}.root')\n", (12816, 12843), False, 'import os\n'), ((12855, 12880), 'os.path.exists', 'os.path.exists', (['root_path'], {}), '(root_path)\n', (12869, 12880), False, 'import os\n'), ((13044, 13073), 'os.path.join', 'os.path.join', (['data_dir', 'split'], {}), '(data_dir, split)\n', (13056, 13073), False, 'import os\n'), ((13096, 13158), 'fairseq.data.data_utils.load_indexed_dataset', 'data_utils.load_indexed_dataset', (['fnames_path', 'self.fnames_dict'], {}), '(fnames_path, self.fnames_dict)\n', (13127, 13158), False, 'from fairseq.data import data_utils, Dictionary\n'), ((13182, 13224), 'os.path.join', 'os.path.join', (['data_dir', 'f"""{split}.lengths"""'], {}), "(data_dir, f'{split}.lengths')\n", (13194, 13224), False, 'import os\n'), ((13579, 13615), 'numpy.array', 'np.array', (['self.sizes'], {'dtype': 'np.int64'}), '(self.sizes, dtype=np.int64)\n', (13587, 13615), True, 'import numpy as np\n'), ((13941, 13955), 'soundfile.read', 'sf.read', (['fname'], {}), '(fname)\n', (13948, 13955), True, 'import soundfile as sf\n'), ((3915, 3945), 'torch.from_numpy', 'torch.from_numpy', (['mask_indices'], {}), '(mask_indices)\n', (3931, 3945), False, 'import torch\n'), ((7225, 7269), 'torch.LongTensor', 'torch.LongTensor', (["[s['id'] for s in samples]"], {}), "([s['id'] for s in samples])\n", (7241, 7269), False, 'import torch\n'), ((11038, 11070), 'pyarrow.array', 'pyarrow.array', (['self.audio_fnames'], {}), '(self.audio_fnames)\n', (11051, 11070), False, 'import pyarrow\n'), ((11104, 11137), 'pyarrow.array', 'pyarrow.array', (['self.visual_fnames'], {}), '(self.visual_fnames)\n', (11117, 11137), False, 'import pyarrow\n'), ((12747, 12781), 'os.path.join', 'os.path.join', (['data_dir', '"""dict.txt"""'], {}), "(data_dir, 'dict.txt')\n", (12759, 12781), False, 'import os\n'), ((13873, 13907), 'os.path.join', 'os.path.join', (['self.root_dir', 'fname'], {}), '(self.root_dir, fname)\n', (13885, 13907), False, 'import os\n'), ((2224, 2239), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2237, 2239), False, 'import torch\n'), ((2265, 2297), 'torch.nn.functional.layer_norm', 'F.layer_norm', (['feats', 'feats.shape'], {}), '(feats, feats.shape)\n', (2277, 2297), True, 'import torch.nn.functional as F\n'), ((8615, 8632), 'numpy.lexsort', 'np.lexsort', (['order'], {}), '(order)\n', (8625, 8632), True, 'import numpy as np\n'), ((8873, 8893), 'numpy.array', 'np.array', (['self.sizes'], {}), '(self.sizes)\n', (8881, 8893), True, 'import numpy as np\n'), ((11584, 11605), 'torch.from_numpy', 'torch.from_numpy', (['wav'], {}), '(wav)\n', (11600, 11605), False, 'import torch\n'), ((13972, 13993), 'torch.from_numpy', 'torch.from_numpy', (['wav'], {}), '(wav)\n', (13988, 13993), False, 'import torch\n'), ((5902, 5948), 'torch.BoolTensor', 'torch.BoolTensor', (['collated_audio_sources.shape'], {}), '(collated_audio_sources.shape)\n', (5918, 5948), False, 'import torch\n'), ((8500, 8520), 'numpy.array', 'np.array', (['self.sizes'], {}), '(self.sizes)\n', (8508, 8520), True, 'import numpy as np\n'), ((4596, 4634), 'torch.from_numpy', 'torch.from_numpy', (['mask_channel_indices'], {}), '(mask_channel_indices)\n', (4612, 4634), False, 'import torch\n')] |
###############################################################################
#
# Copyright (c) 2016, <NAME>,
# University of Sao Paulo, Sao Paulo, Brazil
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# #############################################################################
import numpy as np
class ParticleFilter(object):
""" Implements a particle filter using ConDensation. """
def __init__(self, num_particles, num_states, dynamics_matrix,
particle_lower_bounds, particle_upper_bounds,
noise_type='gaussian', noise_param1=None, noise_param2=None,
final_state_decision_method='weighted_average'):
""" dynamics_matrix is a ns x ns square matrix, where ns = num_states
particle_lower_bounds is a vector that represents
the minimum values of each state
particle_upper_bounds is a vector that represents
the maximum values of each state
noise_type must be either 'gaussian' or 'uniform'
noise_param1 must be either None of a vector with num_states elements.
If it is set as None, then it is initialized as a vector of zeros.
If noise_type is gaussian, this parameter represents the means of
the noise distribution, while if the noise_type is uniform, then
it represents the lower bounds of the interval
noise_param2 is similar to noise_param1. If it is None, it is set as a
vector of ones. When the noise_type is gaussian, it represents the
standard deviations, while if it is uniform, it is the upper bounds
of the interval
final_state_decision_method must be either 'best', 'average' or
'weighted_average'. If best, the particle with highest weight is
chosen as the new state. If average, the new state is computed
from the simple average of all the particles. If weighted_average,
the state comes from an average of all particles averaged by
their weights
"""
self._num_particles = num_particles
self._num_states = num_states
self._dynamics_matrix = np.array(dynamics_matrix)
self._particle_lower_bounds = np.array(particle_lower_bounds)
self._particle_upper_bounds = np.array(particle_upper_bounds)
self._noise_type = noise_type
if noise_param1 is None:
self._noise_param1 = np.zeros(num_states)
elif len(noise_param1) == num_states:
self._noise_param1 = noise_param1
if noise_param2 is None:
self._noise_param2 = np.ones(num_states)
elif len(noise_param2) == num_states:
self._noise_param2 = noise_param2
self._final_state_decision_method = final_state_decision_method
self._final_state = np.zeros(num_states)
self._particles = np.zeros((num_particles, num_states), np.float64)
self._weights = np.zeros((num_particles, 1), np.float64)
self._normalized_weights = np.zeros((num_particles, 1), np.float64)
self._weight_sum = 0.0
self._cumulative_weights = np.zeros((num_particles, 1), np.float64)
self._init_weights()
def get_final_state(self):
""" Computes the final state estimated by the particles, according to
self.final_state_decision_method.
"""
if self._final_state_decision_method == 'best':
index = np.argmax(self._weights)
final_state = self._particles[index].copy()
elif self._final_state_decision_method == 'average':
final_state = np.sum(self._particles, axis=0)
final_state /= self._num_particles
elif self._final_state_decision_method == 'weighted_average':
weighted_particles = self._particles * self._normalized_weights
final_state = np.sum(weighted_particles, axis=0)
# self._final_state = self._apply_dynamics(final_state)
self._final_state = final_state
return self._final_state
def init_particles(self, init_method='uniform',
init_param1=None, init_param2=None):
""" Initialize all the particles.
init_method must be either 'uniform' or 'gaussian'. This parameter
indicates how the particles are initially spread in the state space
init_param1 must be either None of a vector with self.num_states
elements. If it is set as None, then it is initialized as
self.particle_lower_bounds. If noise_type is gaussian, this
parameter represents the means of the noise distribution, while if
the noise_type is uniform, then it represents the lower bounds of
the interval
init_param2 is similar to init_param2. If it is None, it is set as
self.particle_upper_bounds. When the noise_type is gaussian, it
represents the standard deviations, while if it is uniform, it is
the upper bounds of the interval
"""
if init_method == 'gaussian':
if init_param1 is None or init_param2 is None:
self._particles = np.random.multivariate_normal(
self._particle_lower_bounds,
np.diag(self._particle_upper_bounds), self._num_particles)
else:
self._particles = np.random.multivariate_normal(
init_param1, np.diag(init_param2), self._num_particles)
elif init_method == 'uniform':
if init_param1 is None or init_param2 is None:
self._particles = np.random.uniform(
self._particle_lower_bounds, self._particle_upper_bounds,
(self._num_particles, self._num_states))
else:
self._particles = np.random.uniform(
init_param1, init_param2,
(self._num_particles, self._num_states))
self._final_state = self.get_final_state()
def _init_weights(self):
""" Initialize the weights of the particles with
a uniform distribution.
"""
weight = 1.0 / self._num_particles
self._weights += weight
self._normalized_weights += weight
self._cumulative_weights = np.arange(weight, 1.0 + weight, weight,
np.float64)
self._weight_sum = 1.0
def _propagate_particles(self):
""" Applies dynamics and noise to all the particles. """
dynamics_particles = np.dot(self._particles, self._dynamics_matrix)
if self._noise_type == 'uniform':
noise = np.random.uniform(self._noise_param1, self._noise_param2,
(self._num_particles, self._num_states))
elif self._noise_type == 'gaussian':
noise = np.random.multivariate_normal(
self._noise_param1, np.diag(self._noise_param2), self._num_particles)
noise_particles = dynamics_particles + noise
self._particles = noise_particles
def _resample_particles(self):
""" Resample new particles from the old ones, according to
self.resampling_method.
"""
old_particles = self._particles.copy()
old_weights = self._weights.copy()
old_normalized_weights = self._normalized_weights.copy()
j = np.random.choice(self._num_particles, self._num_particles,
p=self._normalized_weights[:, 0])
self._particles = old_particles[j]
self._weights = old_weights[j]
self._normalized_weights = old_normalized_weights[j]
# print(j)
# sds
# print(x.shape, self._cumulative_weights.shape)
# sds
# for i in range(self._num_particles):
# x = np.random.uniform(0.0, self._weight_sum)
# j = np.searchsorted(self._cumulative_weights, x)
# self._particles[i] = old_particles[j].copy()
# self._weights[i] = old_weights[j].copy()
# self._normalized_weights[i] = old_normalized_weights[j].copy()
def update(self, weighting_function, *args):
""" Updates all the particles by resampling, propagating and updating
their weights.
"""
self._resample_particles()
self._propagate_particles()
self._update_weights(weighting_function, *args)
self.get_final_state()
def _update_weights(self, weighting_function, *args):
""" Updates the weight of all the particles.
weighting_function is a reference to a function that effectively
computes the new weights of the particles *args are the parameters,
besides the particle state, that weighting_function may require
"""
self._weights = weighting_function(self._particles, *args)
self._weight_sum = np.sum(self._weights)
self._cumulative_weights = np.cumsum(self._weights)
if self._weight_sum > 0:
self._normalized_weights = self._weights / self._weight_sum
# for i in range(len(self._particles)):
# self._normalized_weights[i] = \
# self._weights[i] / self._weight_sum
@property
def normalized_weights(self):
return self._normalized_weights
@property
def num_states(self):
return self._num_states
@property
def particles(self):
return self._particles
@property
def weights(self):
return self._weights
@property
def weight_sum(self):
return self._weight_sum
| [
"numpy.random.uniform",
"numpy.sum",
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"numpy.cumsum",
"numpy.array",
"numpy.arange",
"numpy.random.choice",
"numpy.dot",
"numpy.diag"
] | [((3642, 3667), 'numpy.array', 'np.array', (['dynamics_matrix'], {}), '(dynamics_matrix)\n', (3650, 3667), True, 'import numpy as np\n'), ((3706, 3737), 'numpy.array', 'np.array', (['particle_lower_bounds'], {}), '(particle_lower_bounds)\n', (3714, 3737), True, 'import numpy as np\n'), ((3776, 3807), 'numpy.array', 'np.array', (['particle_upper_bounds'], {}), '(particle_upper_bounds)\n', (3784, 3807), True, 'import numpy as np\n'), ((4303, 4323), 'numpy.zeros', 'np.zeros', (['num_states'], {}), '(num_states)\n', (4311, 4323), True, 'import numpy as np\n'), ((4350, 4399), 'numpy.zeros', 'np.zeros', (['(num_particles, num_states)', 'np.float64'], {}), '((num_particles, num_states), np.float64)\n', (4358, 4399), True, 'import numpy as np\n'), ((4424, 4464), 'numpy.zeros', 'np.zeros', (['(num_particles, 1)', 'np.float64'], {}), '((num_particles, 1), np.float64)\n', (4432, 4464), True, 'import numpy as np\n'), ((4500, 4540), 'numpy.zeros', 'np.zeros', (['(num_particles, 1)', 'np.float64'], {}), '((num_particles, 1), np.float64)\n', (4508, 4540), True, 'import numpy as np\n'), ((4607, 4647), 'numpy.zeros', 'np.zeros', (['(num_particles, 1)', 'np.float64'], {}), '((num_particles, 1), np.float64)\n', (4615, 4647), True, 'import numpy as np\n'), ((7758, 7809), 'numpy.arange', 'np.arange', (['weight', '(1.0 + weight)', 'weight', 'np.float64'], {}), '(weight, 1.0 + weight, weight, np.float64)\n', (7767, 7809), True, 'import numpy as np\n'), ((8017, 8063), 'numpy.dot', 'np.dot', (['self._particles', 'self._dynamics_matrix'], {}), '(self._particles, self._dynamics_matrix)\n', (8023, 8063), True, 'import numpy as np\n'), ((8855, 8952), 'numpy.random.choice', 'np.random.choice', (['self._num_particles', 'self._num_particles'], {'p': 'self._normalized_weights[:, 0]'}), '(self._num_particles, self._num_particles, p=self.\n _normalized_weights[:, 0])\n', (8871, 8952), True, 'import numpy as np\n'), ((10356, 10377), 'numpy.sum', 'np.sum', (['self._weights'], {}), '(self._weights)\n', (10362, 10377), True, 'import numpy as np\n'), ((10413, 10437), 'numpy.cumsum', 'np.cumsum', (['self._weights'], {}), '(self._weights)\n', (10422, 10437), True, 'import numpy as np\n'), ((3912, 3932), 'numpy.zeros', 'np.zeros', (['num_states'], {}), '(num_states)\n', (3920, 3932), True, 'import numpy as np\n'), ((4091, 4110), 'numpy.ones', 'np.ones', (['num_states'], {}), '(num_states)\n', (4098, 4110), True, 'import numpy as np\n'), ((4918, 4942), 'numpy.argmax', 'np.argmax', (['self._weights'], {}), '(self._weights)\n', (4927, 4942), True, 'import numpy as np\n'), ((8126, 8229), 'numpy.random.uniform', 'np.random.uniform', (['self._noise_param1', 'self._noise_param2', '(self._num_particles, self._num_states)'], {}), '(self._noise_param1, self._noise_param2, (self.\n _num_particles, self._num_states))\n', (8143, 8229), True, 'import numpy as np\n'), ((5086, 5117), 'numpy.sum', 'np.sum', (['self._particles'], {'axis': '(0)'}), '(self._particles, axis=0)\n', (5092, 5117), True, 'import numpy as np\n'), ((5337, 5371), 'numpy.sum', 'np.sum', (['weighted_particles'], {'axis': '(0)'}), '(weighted_particles, axis=0)\n', (5343, 5371), True, 'import numpy as np\n'), ((6735, 6771), 'numpy.diag', 'np.diag', (['self._particle_upper_bounds'], {}), '(self._particle_upper_bounds)\n', (6742, 6771), True, 'import numpy as np\n'), ((6910, 6930), 'numpy.diag', 'np.diag', (['init_param2'], {}), '(init_param2)\n', (6917, 6930), True, 'import numpy as np\n'), ((7085, 7205), 'numpy.random.uniform', 'np.random.uniform', (['self._particle_lower_bounds', 'self._particle_upper_bounds', '(self._num_particles, self._num_states)'], {}), '(self._particle_lower_bounds, self._particle_upper_bounds,\n (self._num_particles, self._num_states))\n', (7102, 7205), True, 'import numpy as np\n'), ((7295, 7384), 'numpy.random.uniform', 'np.random.uniform', (['init_param1', 'init_param2', '(self._num_particles, self._num_states)'], {}), '(init_param1, init_param2, (self._num_particles, self.\n _num_states))\n', (7312, 7384), True, 'import numpy as np\n'), ((8395, 8422), 'numpy.diag', 'np.diag', (['self._noise_param2'], {}), '(self._noise_param2)\n', (8402, 8422), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from os import makedirs
from Team import Team
# Meaning of these abreviations are in data_abreviation.txt
COLUMNS_TO_KEEP = ['HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'HST', 'AST', 'HC', 'AC']
FEATURES_NAME = ['id','HN', 'AN',
'HAR', 'HDR', 'HMR', 'HOR', 'HPKST', 'HPKG', 'HPKC', 'HGD', 'HS', 'HWS', 'HF',
'AAR', 'ADR', 'AMR', 'AOR', 'APKST', 'APKG', 'APKC', 'AGD', 'AS', 'AWS', 'AF',
'PKSTD', 'PKGD', 'PKCD', 'GDD', 'ARD', 'DRD', 'MRD', 'ORD', 'SD', 'WSD', 'FD',
'avgGoalSH', 'avgGoalSA', 'avgGoalSHH', 'avgGoalCHH', 'avgGoalSAA', 'avgGoalCAA',
'HR', '>1.5', '>2.5']
class PremierLeague:
""" A class used to update the data of the teams of the Premier League from the
beginning to the end if the ligue.
Attributes
----------
_teams : dict
Dictionnary containing the name of the teams and their current ranking.
_gamesPath : str
Path of the CSV file containing the data of the games.
_ratesPath : str
Path of the CSV file containing the various rates of the teams.
_k : int
Number of games to consider
Methods
-------
"""
def __init__(self, gamesPath, ratesPath, k, season, firstGameId):
self._gameId = firstGameId
self._teams = {}
self._ranking = None
self._dataset = 0
self._gamesPath = gamesPath
self._k = k
self._season = season
self._goalScoredH = []
self._goalScoredA = []
self._gamesPlayed = 0
self._avgGoalScoredHome = []
self._avgGoalScoredAway = []
if isinstance(ratesPath, str):
self._ratesPath = ratesPath
else:
self._ratings = ratesPath
self._ratesPath = False
def get_dataset(self):
print(self._gamesPath)
games = pd.read_csv(self._gamesPath, encoding= 'unicode_escape')
df = self.read_games_file(games)
return pd.DataFrame(df, columns=FEATURES_NAME)
def get_dataset_update(self, games):
df = self.read_games_file(games)
return pd.DataFrame(df, columns=FEATURES_NAME)
def read_games_file(self, games):
games = games[COLUMNS_TO_KEEP]
self._teams = self.create_teams(games['HomeTeam'])
dataset = []
if self._season in ['2005', '2012', '2013', '2015']: nbGames = games.shape[0] - 1
else: nbGames = games.shape[0]
for i in range(nbGames):
game = games.iloc[i]
self._gameId+=1
homeTeam = game['HomeTeam'] # name of the home team
awayTeam = game['AwayTeam'] # name of the away team
# If both teams has played at least k+1 game then this game can be put into the dataset
# so I need to increment the number of games played in each team in order to keep a counter
# put getters ? The "Pythonic" way is not to use "getters" and "setters", but to use plain attributes
if self._teams[homeTeam]._gamesPlayed >= self._k and self._teams[awayTeam]._gamesPlayed >= self._k:
homeStatistics = self._teams[homeTeam].compute_statistics(self._k)
awayStatistics = self._teams[awayTeam].compute_statistics(self._k)
differentialStatistics = Team.compute_differential_statistics(homeStatistics, awayStatistics)
homeRatings = self._teams[homeTeam].get_ratings()
awayRatings = self._teams[awayTeam].get_ratings()
differentialRatings = Team.compute_differential_ratings(homeRatings, awayRatings)
homeStreak = self._teams[homeTeam].compute_streak(self._k)
awayStreak = self._teams[awayTeam].compute_streak(self._k)
differentialStreak = Team.compute_differential_streak(homeStreak, awayStreak)
homeWeightedStreak = self._teams[homeTeam].compute_weighted_streak(self._k)
awayWeightedStreak = self._teams[awayTeam].compute_weighted_streak(self._k)
differentialWeightedStreak = Team.compute_differential_weighted_streak(homeWeightedStreak, awayWeightedStreak)
homeForm = self._teams[homeTeam].get_form()
awayForm = self._teams[awayTeam].get_form()
differentialForm = Team.compute_differential_form(homeForm, awayForm)
homeFeatures = homeRatings+homeStatistics+homeStreak+homeWeightedStreak+homeForm
awayFeatures = awayRatings+awayStatistics+awayStreak+awayWeightedStreak+awayForm
differentialFeatures = differentialStatistics+differentialRatings+differentialStreak+differentialWeightedStreak+differentialForm
avgGoalsFeatures = self.get_average_goals(self._teams[homeTeam], self._teams[awayTeam])
# Home win (3), home draw (1), home lose (0)
# > 1.5 goals = True
# > 2.5 goals = True
gameGoalsResults = self.result_15_25(game['FTHG'], game['FTAG'])
totalFeatures = (self._gameId,) + (homeTeam, awayTeam) + homeFeatures + awayFeatures + differentialFeatures + avgGoalsFeatures + gameGoalsResults
dataset.append(totalFeatures)
self.update_ranking(self._teams[homeTeam]._name, self._teams[awayTeam]._name, game['FTHG'], game['FTAG'], self._teams)
self.updateSeasonStats(game['FTHG'], game['FTAG'])
self._teams[homeTeam], self._teams[awayTeam] = self.update_teams(game, self._teams[homeTeam], self._teams[awayTeam], self._gameId)
return dataset
def create_teams(self, teams):
try:
ratings = pd.read_csv(self._ratesPath)
except:
print("Update dataset")
ratings = self._ratings
teams = list(dict.fromkeys(teams.tolist()))
teams = sorted([team for team in teams if team==team])
dictTeams = {}
ranking = {} # name, ranking, gd, gs, gc
for i, team in enumerate(teams):
teamRates = ratings.loc[ratings['name'] == team]
attackRating, defenseRating, midFieldRating, overallRating = self.get_ratings(teamRates)
dictTeams[team] = Team(team, attackRating, defenseRating, midFieldRating, overallRating, 1)
ranking[team] = [0, 0, 0, 0]
self._ranking = pd.DataFrame.from_dict(ranking, orient='index',columns=['pts', 'gd', 'gs', 'gc'])
return dictTeams
def get_ratings(self, teamRates):
attackRate = teamRates['attack'].values[0]
defenseRate = teamRates['defense'].values[0]
midFieldRate = teamRates['midField'].values[0]
overallRate = teamRates['overall'].values[0]
return attackRate, defenseRate, midFieldRate, overallRate
# League ranking rules
# 1. points
# 2. GD
# 3. Nb of goals scored
def update_ranking(self, homeTeamName, awayTeamName, homeGs, awayGs, teams):
if homeGs > awayGs:
self._ranking.loc[homeTeamName]['pts']+=3
elif homeGs < awayGs:
self._ranking.loc[awayTeamName]['pts']+=3
else:
self._ranking.loc[homeTeamName]['pts']+=1
self._ranking.loc[awayTeamName]['pts']+=1
self._ranking.loc[homeTeamName]['gs']+=homeGs
self._ranking.loc[homeTeamName]['gc']+=awayGs
self._ranking.loc[homeTeamName]['gd']+=homeGs-awayGs
self._ranking.loc[awayTeamName]['gs']+=awayGs
self._ranking.loc[awayTeamName]['gc']+=homeGs
self._ranking.loc[awayTeamName]['gd']+=awayGs-homeGs
self._ranking = self._ranking.sort_values(by=['pts', 'gd', 'gs', 'gc'], ascending=False)
self._gamesPlayed += 1
if not self._gamesPlayed%10:
for team in teams.keys():
teams[team]._rank.append(self.get_rank(team))
def update_teams(self, game, homeTeam, awayTeam, gameId):
homeTeam.update(game['FTHG'], game['FTAG'], game['HST'], game['HC'], awayTeam._form[-1], 'H', gameId)
# -2 and not -1 otherwise it will update the awayTeam form value with the updated homeTeam form value !
awayTeam.update(game['FTAG'], game['FTHG'], game['AST'], game['AC'], homeTeam._form[-2], 'A', gameId)
return homeTeam, awayTeam
def get_rank(self, teamName):
return self._ranking.index.to_list().index(teamName)+1
def get_average_goals(self, homeTeam, awayTeam):
avgGoalSH = self._avgGoalScoredHome[-1] # avg of goals scored at home in the league
avgGoalSA = self._avgGoalScoredAway[-1] # avg of goals scored at away in the league
avgGoalSHH = np.mean(homeTeam._goalScoredH) # avg of goals scored at home by home team
avgGoalCHH = np.mean(homeTeam._goalConcededH) # avg of goals conceded at home by home team
avgGoalSAA = np.mean(awayTeam._goalScoredA) # avg of goals scored away by away team
avgGoalCAA = np.mean(awayTeam._goalConcededA) # avg of goals conceded away by away team
return avgGoalSH, avgGoalSA, avgGoalSHH, avgGoalCHH, avgGoalSAA, avgGoalCAA
def result_15_25(self, homeGoals, awayGoals):
if homeGoals >= awayGoals:
if homeGoals == awayGoals: res = 1
else: res = 3
else: res = 0
totalGoals = homeGoals+awayGoals
if totalGoals > 1.5: goal15 = True
else : goal15 = False
if totalGoals > 2.5: goal25 = True
else : goal25 = False
return res, goal15, goal25
def updateSeasonStats(self, homeScoredGoals, awayScoredGoals):
self._goalScoredH.append(homeScoredGoals)
self._goalScoredA.append(awayScoredGoals)
self._avgGoalScoredHome.append(np.mean(self._goalScoredH))
self._avgGoalScoredAway.append(np.mean(self._goalScoredA))
def save_dataset(self, dataset, year, pathDir):
try:
makedirs(pathDir, exist_ok = True)
dataset.to_csv(pathDir+'dataset_'+year+'.csv', index = False, header=True)
print(year + " dataset saved successfully!")
except OSError as error:
print(year + " dataset directory can not be created or the dataset cannot be saved")
def save_season_data(self, year, pathDir):
try:
makedirs(pathDir, exist_ok = True)
dict = {
'GoalSH' : self._goalScoredH,
'GoalSA' : self._goalScoredA,
'AvgGoalSH' : self._avgGoalScoredHome,
'AvgGoalSA' : self._avgGoalScoredAway
}
df = pd.DataFrame(dict)
df.to_csv(pathDir+'data_season_'+year+'.csv', index = False, header=True)
print(year + " season data saved successfully!")
except OSError as error:
print(year + " season data directory can not be created or the season data cannot be saved")
def save_data_teams(self, pathDir, year):
try:
makedirs(pathDir, exist_ok = True)
print(year + " teams directory created successfully (or already existed)")
except OSError as error:
print(year + " teams directory directory can not be created")
try:
for team in self._teams.keys():
self._teams[team].save_team_data(pathDir+'/'+self._teams[team]._name, year)
print(year + " teams files saved succesfully!")
except:
print(year + " teams files cannot be created")
def get_data_teams(self, year):
frames = []
for team in self._teams.keys():
frames.append(self._teams[team].get_team_data(year))
return pd.concat(frames).sort_values(by='id', ascending=True) | [
"pandas.DataFrame",
"pandas.DataFrame.from_dict",
"os.makedirs",
"pandas.read_csv",
"Team.Team.compute_differential_statistics",
"Team.Team.compute_differential_streak",
"Team.Team",
"numpy.mean",
"Team.Team.compute_differential_weighted_streak",
"Team.Team.compute_differential_form",
"Team.Team... | [((1946, 2001), 'pandas.read_csv', 'pd.read_csv', (['self._gamesPath'], {'encoding': '"""unicode_escape"""'}), "(self._gamesPath, encoding='unicode_escape')\n", (1957, 2001), True, 'import pandas as pd\n'), ((2059, 2098), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'columns': 'FEATURES_NAME'}), '(df, columns=FEATURES_NAME)\n', (2071, 2098), True, 'import pandas as pd\n'), ((2202, 2241), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'columns': 'FEATURES_NAME'}), '(df, columns=FEATURES_NAME)\n', (2214, 2241), True, 'import pandas as pd\n'), ((6582, 6668), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['ranking'], {'orient': '"""index"""', 'columns': "['pts', 'gd', 'gs', 'gc']"}), "(ranking, orient='index', columns=['pts', 'gd', 'gs',\n 'gc'])\n", (6604, 6668), True, 'import pandas as pd\n'), ((8876, 8906), 'numpy.mean', 'np.mean', (['homeTeam._goalScoredH'], {}), '(homeTeam._goalScoredH)\n', (8883, 8906), True, 'import numpy as np\n'), ((8971, 9003), 'numpy.mean', 'np.mean', (['homeTeam._goalConcededH'], {}), '(homeTeam._goalConcededH)\n', (8978, 9003), True, 'import numpy as np\n'), ((9070, 9100), 'numpy.mean', 'np.mean', (['awayTeam._goalScoredA'], {}), '(awayTeam._goalScoredA)\n', (9077, 9100), True, 'import numpy as np\n'), ((9162, 9194), 'numpy.mean', 'np.mean', (['awayTeam._goalConcededA'], {}), '(awayTeam._goalConcededA)\n', (9169, 9194), True, 'import numpy as np\n'), ((5905, 5933), 'pandas.read_csv', 'pd.read_csv', (['self._ratesPath'], {}), '(self._ratesPath)\n', (5916, 5933), True, 'import pandas as pd\n'), ((6443, 6516), 'Team.Team', 'Team', (['team', 'attackRating', 'defenseRating', 'midFieldRating', 'overallRating', '(1)'], {}), '(team, attackRating, defenseRating, midFieldRating, overallRating, 1)\n', (6447, 6516), False, 'from Team import Team\n'), ((9937, 9963), 'numpy.mean', 'np.mean', (['self._goalScoredH'], {}), '(self._goalScoredH)\n', (9944, 9963), True, 'import numpy as np\n'), ((10004, 10030), 'numpy.mean', 'np.mean', (['self._goalScoredA'], {}), '(self._goalScoredA)\n', (10011, 10030), True, 'import numpy as np\n'), ((10114, 10146), 'os.makedirs', 'makedirs', (['pathDir'], {'exist_ok': '(True)'}), '(pathDir, exist_ok=True)\n', (10122, 10146), False, 'from os import makedirs\n'), ((10500, 10532), 'os.makedirs', 'makedirs', (['pathDir'], {'exist_ok': '(True)'}), '(pathDir, exist_ok=True)\n', (10508, 10532), False, 'from os import makedirs\n'), ((10792, 10810), 'pandas.DataFrame', 'pd.DataFrame', (['dict'], {}), '(dict)\n', (10804, 10810), True, 'import pandas as pd\n'), ((11168, 11200), 'os.makedirs', 'makedirs', (['pathDir'], {'exist_ok': '(True)'}), '(pathDir, exist_ok=True)\n', (11176, 11200), False, 'from os import makedirs\n'), ((3422, 3490), 'Team.Team.compute_differential_statistics', 'Team.compute_differential_statistics', (['homeStatistics', 'awayStatistics'], {}), '(homeStatistics, awayStatistics)\n', (3458, 3490), False, 'from Team import Team\n'), ((3662, 3721), 'Team.Team.compute_differential_ratings', 'Team.compute_differential_ratings', (['homeRatings', 'awayRatings'], {}), '(homeRatings, awayRatings)\n', (3695, 3721), False, 'from Team import Team\n'), ((3910, 3966), 'Team.Team.compute_differential_streak', 'Team.compute_differential_streak', (['homeStreak', 'awayStreak'], {}), '(homeStreak, awayStreak)\n', (3942, 3966), False, 'from Team import Team\n'), ((4197, 4282), 'Team.Team.compute_differential_weighted_streak', 'Team.compute_differential_weighted_streak', (['homeWeightedStreak', 'awayWeightedStreak'], {}), '(homeWeightedStreak,\n awayWeightedStreak)\n', (4238, 4282), False, 'from Team import Team\n'), ((4435, 4485), 'Team.Team.compute_differential_form', 'Team.compute_differential_form', (['homeForm', 'awayForm'], {}), '(homeForm, awayForm)\n', (4465, 4485), False, 'from Team import Team\n'), ((11877, 11894), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (11886, 11894), True, 'import pandas as pd\n')] |
import unittest
import numpy as np
import h5py
from ..fourigui.fourigui import Gui
class TestGui(unittest.TestCase):
def setUp(self):
# set up gui
self.test_gui = Gui()
# set up test data
self.test_sofq = h5py.File('diffpy/tests/testdata/sofq.h5')['data']
self.test_sofq_cut_10to40px = h5py.File('diffpy/tests/testdata/sofq_cut_10to40px.h5')['data']
self.test_sofq_cut_15to35px = h5py.File('diffpy/tests/testdata/sofq_cut_15to35px.h5')['data']
self.test_gofr = h5py.File('diffpy/tests/testdata/gofr.h5')['data']
self.test_gofr_cut_10to40px = h5py.File('diffpy/tests/testdata/gofr_from_sofq_cut_10to40px.h5')['data']
self.test_gofr_cut_15to35px = h5py.File('diffpy/tests/testdata/gofr_from_sofq_cut_15to35px.h5')['data']
def test_load_cube_testdataset1(self):
# given
self.test_gui.filename_entry.delete(0, 'end')
self.test_gui.filename_entry.insert(0, 'diffpy/tests/testdata/sofq.h5')
# when
self.test_gui.load_cube()
result = self.test_gui.cube
# then
self.assertTrue(np.allclose(result, self.test_sofq))
def test_load_cube_testdataset2(self):
# given
self.test_gui.filename_entry.delete(0, 'end')
self.test_gui.filename_entry.insert(0, 'diffpy/tests/testdata/sofq_cut_10to40px.h5')
# when
self.test_gui.load_cube()
result = self.test_gui.cube
# then
self.assertTrue(np.allclose(np.nan_to_num(result), np.nan_to_num(self.test_sofq_cut_10to40px)))
def test_load_cube_testdataset3(self):
# given
self.test_gui.filename_entry.delete(0, 'end')
self.test_gui.filename_entry.insert(0, 'diffpy/tests/testdata/sofq_cut_15to35px.h5')
# when
self.test_gui.load_cube()
result = self.test_gui.cube
# then
self.assertTrue(np.allclose(np.nan_to_num(result), np.nan_to_num(self.test_sofq_cut_15to35px)))
def test_fft_testdataset1(self):
# given
self.test_gui.plot_plane = lambda *a, **b: () # overwrite plot_plane which requires not initialized attribute im
self.test_gui.cube = self.test_sofq
# when
self.test_gui.fft()
result = self.test_gui.cube
# then
self.assertTrue(np.allclose(result, self.test_gofr))
def test_fft_testdataset2(self):
# given
self.test_gui.plot_plane = lambda *a, **b: () # overwrite plot_plane which requires not initialized attribute im
self.test_gui.cube = self.test_sofq_cut_10to40px
# when
self.test_gui.fft()
result = self.test_gui.cube
# then
self.assertTrue(np.allclose(result, self.test_gofr_cut_10to40px))
def test_fft_testdataset3(self):
# given
self.test_gui.plot_plane = lambda *a, **b: () # overwrite plot_plane which requires not initialized attribute im
self.test_gui.cube = self.test_sofq_cut_15to35px
# when
self.test_gui.fft()
result = self.test_gui.cube
# then
self.assertTrue(np.allclose(result, self.test_gofr_cut_15to35px))
def test_applycutoff_range1(self):
# given
self.test_gui.plot_plane = lambda *a, **b: ()
self.test_gui.cube = self.test_sofq
self.test_gui.qminentry.insert(0, '10')
self.test_gui.qmaxentry.insert(0, '40')
# when
self.test_gui.applycutoff()
result = self.test_gui.cube
# then
self.assertTrue(np.allclose(np.nan_to_num(result), np.nan_to_num(self.test_sofq_cut_10to40px)))
def test_applycutoff_range2(self):
# given
self.test_gui.plot_plane = lambda *a, **b: ()
self.test_gui.cube = self.test_sofq
self.test_gui.qminentry.insert(0, '15')
self.test_gui.qmaxentry.insert(0, '35')
# when
self.test_gui.applycutoff()
result = self.test_gui.cube
# then
self.assertTrue(np.allclose(np.nan_to_num(result), np.nan_to_num(self.test_sofq_cut_15to35px)))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"h5py.File",
"numpy.nan_to_num",
"numpy.allclose"
] | [((4112, 4127), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4125, 4127), False, 'import unittest\n'), ((245, 287), 'h5py.File', 'h5py.File', (['"""diffpy/tests/testdata/sofq.h5"""'], {}), "('diffpy/tests/testdata/sofq.h5')\n", (254, 287), False, 'import h5py\n'), ((334, 389), 'h5py.File', 'h5py.File', (['"""diffpy/tests/testdata/sofq_cut_10to40px.h5"""'], {}), "('diffpy/tests/testdata/sofq_cut_10to40px.h5')\n", (343, 389), False, 'import h5py\n'), ((436, 491), 'h5py.File', 'h5py.File', (['"""diffpy/tests/testdata/sofq_cut_15to35px.h5"""'], {}), "('diffpy/tests/testdata/sofq_cut_15to35px.h5')\n", (445, 491), False, 'import h5py\n'), ((525, 567), 'h5py.File', 'h5py.File', (['"""diffpy/tests/testdata/gofr.h5"""'], {}), "('diffpy/tests/testdata/gofr.h5')\n", (534, 567), False, 'import h5py\n'), ((614, 679), 'h5py.File', 'h5py.File', (['"""diffpy/tests/testdata/gofr_from_sofq_cut_10to40px.h5"""'], {}), "('diffpy/tests/testdata/gofr_from_sofq_cut_10to40px.h5')\n", (623, 679), False, 'import h5py\n'), ((726, 791), 'h5py.File', 'h5py.File', (['"""diffpy/tests/testdata/gofr_from_sofq_cut_15to35px.h5"""'], {}), "('diffpy/tests/testdata/gofr_from_sofq_cut_15to35px.h5')\n", (735, 791), False, 'import h5py\n'), ((1120, 1155), 'numpy.allclose', 'np.allclose', (['result', 'self.test_sofq'], {}), '(result, self.test_sofq)\n', (1131, 1155), True, 'import numpy as np\n'), ((2322, 2357), 'numpy.allclose', 'np.allclose', (['result', 'self.test_gofr'], {}), '(result, self.test_gofr)\n', (2333, 2357), True, 'import numpy as np\n'), ((2711, 2759), 'numpy.allclose', 'np.allclose', (['result', 'self.test_gofr_cut_10to40px'], {}), '(result, self.test_gofr_cut_10to40px)\n', (2722, 2759), True, 'import numpy as np\n'), ((3113, 3161), 'numpy.allclose', 'np.allclose', (['result', 'self.test_gofr_cut_15to35px'], {}), '(result, self.test_gofr_cut_15to35px)\n', (3124, 3161), True, 'import numpy as np\n'), ((1502, 1523), 'numpy.nan_to_num', 'np.nan_to_num', (['result'], {}), '(result)\n', (1515, 1523), True, 'import numpy as np\n'), ((1525, 1567), 'numpy.nan_to_num', 'np.nan_to_num', (['self.test_sofq_cut_10to40px'], {}), '(self.test_sofq_cut_10to40px)\n', (1538, 1567), True, 'import numpy as np\n'), ((1915, 1936), 'numpy.nan_to_num', 'np.nan_to_num', (['result'], {}), '(result)\n', (1928, 1936), True, 'import numpy as np\n'), ((1938, 1980), 'numpy.nan_to_num', 'np.nan_to_num', (['self.test_sofq_cut_15to35px'], {}), '(self.test_sofq_cut_15to35px)\n', (1951, 1980), True, 'import numpy as np\n'), ((3553, 3574), 'numpy.nan_to_num', 'np.nan_to_num', (['result'], {}), '(result)\n', (3566, 3574), True, 'import numpy as np\n'), ((3576, 3618), 'numpy.nan_to_num', 'np.nan_to_num', (['self.test_sofq_cut_10to40px'], {}), '(self.test_sofq_cut_10to40px)\n', (3589, 3618), True, 'import numpy as np\n'), ((4011, 4032), 'numpy.nan_to_num', 'np.nan_to_num', (['result'], {}), '(result)\n', (4024, 4032), True, 'import numpy as np\n'), ((4034, 4076), 'numpy.nan_to_num', 'np.nan_to_num', (['self.test_sofq_cut_15to35px'], {}), '(self.test_sofq_cut_15to35px)\n', (4047, 4076), True, 'import numpy as np\n')] |
import argparse
import Models
import queue
import cv2
import numpy as np
from PIL import Image, ImageDraw
#parse parameters
parser = argparse.ArgumentParser()
parser.add_argument("--input_video_path", type=str)
parser.add_argument("--output_video_path", type=str, default = "")
parser.add_argument("--save_weights_path", type = str )
parser.add_argument("--n_classes", type=int )
args = parser.parse_args()
input_video_path = args.input_video_path
output_video_path = args.output_video_path
save_weights_path = args.save_weights_path
n_classes = args.n_classes
if output_video_path == "":
#output video in same path
output_video_path = input_video_path.split('.')[0] + "_TrackNet.mp4"
#get video fps&video size
video = cv2.VideoCapture(input_video_path)
fps = int(video.get(cv2.CAP_PROP_FPS))
output_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
output_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
#start from first frame
currentFrame = 0
#width and height in TrackNet
width , height = 640, 360
img, img1, img2 = None, None, None
#load TrackNet model
modelFN = Models.TrackNet.TrackNet
m = modelFN( n_classes , input_height=height, input_width=width )
m.compile(loss='categorical_crossentropy', optimizer= 'adadelta' , metrics=['accuracy'])
m.load_weights( save_weights_path )
# In order to draw the trajectory of tennis, we need to save the coordinate of preious 7 frames
q = queue.deque()
for i in range(0,8):
q.appendleft(None)
#save prediction images as vidoe
#Tutorial: https://stackoverflow.com/questions/33631489/error-during-saving-a-video-using-python-and-opencv
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_video = cv2.VideoWriter(output_video_path,fourcc, fps, (output_width,output_height))
#both first and second frames cant be predict, so we directly write the frames to output video
#capture frame-by-frame
video.set(1,currentFrame);
ret, img1 = video.read()
#write image to video
output_video.write(img1)
currentFrame +=1
#resize it
img1 = cv2.resize(img1, ( width , height ))
#input must be float type
img1 = img1.astype(np.float32)
#capture frame-by-frame
video.set(1,currentFrame);
ret, img = video.read()
#write image to video
output_video.write(img)
currentFrame +=1
#resize it
img = cv2.resize(img, ( width , height ))
#input must be float type
img = img.astype(np.float32)
while(True):
img2 = img1
img1 = img
#capture frame-by-frame
video.set(1,currentFrame);
ret, img = video.read()
#if there dont have any frame in video, break
if not ret:
break
#img is the frame that TrackNet will predict the position
#since we need to change the size and type of img, copy it to output_img
output_img = img
#resize it
img = cv2.resize(img, ( width , height ))
#input must be float type
img = img.astype(np.float32)
#combine three imgs to (width , height, rgb*3)
X = np.concatenate((img, img1, img2),axis=2)
#since the odering of TrackNet is 'channels_first', so we need to change the axis
X = np.rollaxis(X, 2, 0)
#prdict heatmap
pr = m.predict( np.array([X]) )[0]
#since TrackNet output is ( net_output_height*model_output_width , n_classes )
#so we need to reshape image as ( net_output_height, model_output_width , n_classes(depth) )
#.argmax( axis=2 ) => select the largest probability as class
pr = pr.reshape(( height , width , n_classes ) ).argmax( axis=2 )
#cv2 image must be numpy.uint8, convert numpy.int64 to numpy.uint8
pr = pr.astype(np.uint8)
#reshape the image size as original input image
heatmap = cv2.resize(pr , (output_width, output_height ))
#heatmap is converted into a binary image by threshold method.
ret,heatmap = cv2.threshold(heatmap,127,255,cv2.THRESH_BINARY)
#find the circle in image with 2<=radius<=7
circles = cv2.HoughCircles(heatmap, cv2.HOUGH_GRADIENT,dp=1,minDist=1,param1=50,param2=2,minRadius=2,maxRadius=7)
#In order to draw the circle in output_img, we need to used PIL library
#Convert opencv image format to PIL image format
PIL_image = cv2.cvtColor(output_img, cv2.COLOR_BGR2RGB)
PIL_image = Image.fromarray(PIL_image)
#check if there have any tennis be detected
if circles is not None:
#if only one tennis be detected
if len(circles) == 1:
x = int(circles[0][0][0])
y = int(circles[0][0][1])
print(currentFrame, x,y)
#push x,y to queue
q.appendleft([x,y])
#pop x,y from queue
q.pop()
else:
#push None to queue
q.appendleft(None)
#pop x,y from queue
q.pop()
else:
#push None to queue
q.appendleft(None)
#pop x,y from queue
q.pop()
#draw current frame prediction and previous 7 frames as yellow circle, total: 8 frames
for i in range(0,8):
if q[i] is not None:
draw_x = q[i][0]
draw_y = q[i][1]
bbox = (draw_x - 2, draw_y - 2, draw_x + 2, draw_y + 2)
draw = ImageDraw.Draw(PIL_image)
draw.ellipse(bbox, outline ='yellow')
del draw
#Convert PIL image format back to opencv image format
opencvImage = cv2.cvtColor(np.array(PIL_image), cv2.COLOR_RGB2BGR)
#write image to output_video
output_video.write(opencvImage)
#next frame
currentFrame += 1
# everything is done, release the video
video.release()
output_video.release()
print("finish")
| [
"numpy.rollaxis",
"cv2.HoughCircles",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"numpy.concatenate",
"cv2.cvtColor",
"cv2.threshold",
"cv2.VideoCapture",
"numpy.array",
"cv2.VideoWriter",
"PIL.Image.fromarray",
"PIL.ImageDraw.Draw",
"cv2.resize",
"queue.deque"
] | [((134, 159), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (157, 159), False, 'import argparse\n'), ((729, 763), 'cv2.VideoCapture', 'cv2.VideoCapture', (['input_video_path'], {}), '(input_video_path)\n', (745, 763), False, 'import cv2\n'), ((1405, 1418), 'queue.deque', 'queue.deque', ([], {}), '()\n', (1416, 1418), False, 'import queue\n'), ((1611, 1642), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (1633, 1642), False, 'import cv2\n'), ((1658, 1736), 'cv2.VideoWriter', 'cv2.VideoWriter', (['output_video_path', 'fourcc', 'fps', '(output_width, output_height)'], {}), '(output_video_path, fourcc, fps, (output_width, output_height))\n', (1673, 1736), False, 'import cv2\n'), ((1992, 2025), 'cv2.resize', 'cv2.resize', (['img1', '(width, height)'], {}), '(img1, (width, height))\n', (2002, 2025), False, 'import cv2\n'), ((2243, 2275), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (2253, 2275), False, 'import cv2\n'), ((2699, 2731), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (2709, 2731), False, 'import cv2\n'), ((2849, 2890), 'numpy.concatenate', 'np.concatenate', (['(img, img1, img2)'], {'axis': '(2)'}), '((img, img1, img2), axis=2)\n', (2863, 2890), True, 'import numpy as np\n'), ((2980, 3000), 'numpy.rollaxis', 'np.rollaxis', (['X', '(2)', '(0)'], {}), '(X, 2, 0)\n', (2991, 3000), True, 'import numpy as np\n'), ((3517, 3562), 'cv2.resize', 'cv2.resize', (['pr', '(output_width, output_height)'], {}), '(pr, (output_width, output_height))\n', (3527, 3562), False, 'import cv2\n'), ((3646, 3697), 'cv2.threshold', 'cv2.threshold', (['heatmap', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(heatmap, 127, 255, cv2.THRESH_BINARY)\n', (3659, 3697), False, 'import cv2\n'), ((3752, 3865), 'cv2.HoughCircles', 'cv2.HoughCircles', (['heatmap', 'cv2.HOUGH_GRADIENT'], {'dp': '(1)', 'minDist': '(1)', 'param1': '(50)', 'param2': '(2)', 'minRadius': '(2)', 'maxRadius': '(7)'}), '(heatmap, cv2.HOUGH_GRADIENT, dp=1, minDist=1, param1=50,\n param2=2, minRadius=2, maxRadius=7)\n', (3768, 3865), False, 'import cv2\n'), ((3993, 4036), 'cv2.cvtColor', 'cv2.cvtColor', (['output_img', 'cv2.COLOR_BGR2RGB'], {}), '(output_img, cv2.COLOR_BGR2RGB)\n', (4005, 4036), False, 'import cv2\n'), ((4053, 4079), 'PIL.Image.fromarray', 'Image.fromarray', (['PIL_image'], {}), '(PIL_image)\n', (4068, 4079), False, 'from PIL import Image, ImageDraw\n'), ((4960, 4979), 'numpy.array', 'np.array', (['PIL_image'], {}), '(PIL_image)\n', (4968, 4979), True, 'import numpy as np\n'), ((3035, 3048), 'numpy.array', 'np.array', (['[X]'], {}), '([X])\n', (3043, 3048), True, 'import numpy as np\n'), ((4796, 4821), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['PIL_image'], {}), '(PIL_image)\n', (4810, 4821), False, 'from PIL import Image, ImageDraw\n')] |
import numpy as np
from sklearn.metrics import classification_report as sk_classification_report
from sklearn.metrics import confusion_matrix
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from utils.molecular_metrics import MolecularMetrics
import tensorflow as tf
from collections import OrderedDict
def mols2grid_image(mols, molsPerRow):
mols = [e if e is not None else Chem.RWMol() for e in mols]
for mol in mols:
AllChem.Compute2DCoords(mol)
return Draw.MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=(150, 150))
def classification_report(data, model, session, sample=False):
_, _, _, a, x, _, f, _, _ = data.next_validation_batch()
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,
model.node_features: f, model.training: False,
model.variational: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
y_true = e.flatten()
y_pred = a.flatten()
target_names = [str(Chem.rdchem.BondType.values[int(e)]) for e in data.bond_decoder_m.values()]
print('######## Classification Report ########\n')
print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),
target_names=target_names))
print('######## Confusion Matrix ########\n')
print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))
y_true = n.flatten()
y_pred = x.flatten()
target_names = [Chem.Atom(e).GetSymbol() for e in data.atom_decoder_m.values()]
print('######## Classification Report ########\n')
print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),
target_names=target_names))
print('\n######## Confusion Matrix ########\n')
print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))
def reconstructions(data, model, session, batch_dim=10, sample=False):
m0, _, _, a, x, _, f, _, _ = data.next_train_batch(batch_dim)
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,
model.node_features: f, model.training: False,
model.variational: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
m1 = np.array([e if e is not None else Chem.RWMol() for e in [data.matrices2mol(n_, e_, strict=True)
for n_, e_ in zip(n, e)]])
mols = np.vstack((m0, m1)).T.flatten()
return mols
def samples(data, model, session, embeddings, sample=False):
n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
model.nodes_argmax, model.edges_argmax], feed_dict={
model.embeddings: embeddings, model.training: False})
n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
mols = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]
return mols
def all_scores(mols, data, norm=False, reconstruction=False):
m0 = {k: list(filter(lambda e: e is not None, v)) for k, v in {
'NP score': MolecularMetrics.natural_product_scores(mols, norm=norm),
'QED score': MolecularMetrics.quantitative_estimation_druglikeness_scores(mols),
'logP score': MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=norm),
'SA score': MolecularMetrics.synthetic_accessibility_score_scores(mols, norm=norm),
'diversity score': MolecularMetrics.diversity_scores(mols, data),
'drugcandidate score': MolecularMetrics.drugcandidate_scores(mols, data)}.items()}
m1 = {'valid score': MolecularMetrics.valid_total_score(mols) * 100,
'unique score': MolecularMetrics.unique_total_score(mols) * 100,
'novel score': MolecularMetrics.novel_total_score(mols, data) * 100}
return m0, m1
_graph_replace = tf.contrib.graph_editor.graph_replace
def remove_original_op_attributes(graph):
"""Remove _original_op attribute from all operations in a graph."""
for op in graph.get_operations():
op._original_op = None
def graph_replace(*args, **kwargs):
"""Monkey patch graph_replace so that it works with TF 1.0"""
remove_original_op_attributes(tf.get_default_graph())
return _graph_replace(*args, **kwargs)
def extract_update_dict(update_ops):
"""Extract variables and their new values from Assign and AssignAdd ops.
Args:
update_ops: list of Assign and AssignAdd ops, typically computed using Keras' opt.get_updates()
Returns:
dict mapping from variable values to their updated value
"""
name_to_var = {v.name: v for v in tf.global_variables()}
updates = OrderedDict()
print(f"update_ops: {update_ops}")
for update in update_ops:
# # print(f"\nupdate: {update}\n")
# try:
# print(f"\nupdate.name {update.name}\n")
# except:
# pass
#
# try:
# print(f"\nname_to_var {name_to_var}\n")
# except:
# pass
#
# try:
# print(f"\nname_to_var[update.name] {name_to_var[update.name]}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.op {update.op}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.op_def {update.op_def}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.type {update.type}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.outputs {update.outputs}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.ops {update.ops}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.__dict__ {update.__dict__}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.grad {update[0]}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.vars {update[1]}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.inputs {update.inputs}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.inputs() {update.inputs()}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.inputs._inputs {update.inputs._inputs}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.inputs {update.inputs[0]}\n")
# except:
# pass
#
# try:
# print(f"\nupdate.inputs {update.inputs[1]}\n")
# except:
# pass
# var_name = update.op.inputs[0].name
# var = name_to_var[var_name]
# value = update.op.inputs[1]
# print(f"update.op.type: {update.op.type}")
# if update.op.type in ['AssignVariableOp', 'Assign']:
# updates[var.value()] = value
var_name = update.inputs[0].name
var = name_to_var[var_name]
value = update.inputs[1]
print(f"update.type: {update.type}")
if update.type in ['AssignVariableOp', 'Assign']:
updates[var.value()] = value
elif update.type in ['AssignAddVariableOp', 'AssignAdd']:
updates[var.value()] = var + value
else:
raise ValueError("Update op type (%s) must be of type Assign or AssignAdd"%update.type)
return updates
| [
"utils.molecular_metrics.MolecularMetrics.water_octanol_partition_coefficient_scores",
"numpy.argmax",
"utils.molecular_metrics.MolecularMetrics.valid_total_score",
"utils.molecular_metrics.MolecularMetrics.novel_total_score",
"rdkit.Chem.Draw.MolsToGridImage",
"utils.molecular_metrics.MolecularMetrics.un... | [((517, 589), 'rdkit.Chem.Draw.MolsToGridImage', 'Draw.MolsToGridImage', (['mols'], {'molsPerRow': 'molsPerRow', 'subImgSize': '(150, 150)'}), '(mols, molsPerRow=molsPerRow, subImgSize=(150, 150))\n', (537, 589), False, 'from rdkit.Chem import Draw\n'), ((5164, 5177), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5175, 5177), False, 'from collections import OrderedDict\n'), ((476, 504), 'rdkit.Chem.AllChem.Compute2DCoords', 'AllChem.Compute2DCoords', (['mol'], {}), '(mol)\n', (499, 504), False, 'from rdkit.Chem import AllChem\n'), ((1123, 1144), 'numpy.argmax', 'np.argmax', (['n'], {'axis': '(-1)'}), '(n, axis=-1)\n', (1132, 1144), True, 'import numpy as np\n'), ((1146, 1167), 'numpy.argmax', 'np.argmax', (['e'], {'axis': '(-1)'}), '(e, axis=-1)\n', (1155, 1167), True, 'import numpy as np\n'), ((2688, 2709), 'numpy.argmax', 'np.argmax', (['n'], {'axis': '(-1)'}), '(n, axis=-1)\n', (2697, 2709), True, 'import numpy as np\n'), ((2711, 2732), 'numpy.argmax', 'np.argmax', (['e'], {'axis': '(-1)'}), '(e, axis=-1)\n', (2720, 2732), True, 'import numpy as np\n'), ((3285, 3306), 'numpy.argmax', 'np.argmax', (['n'], {'axis': '(-1)'}), '(n, axis=-1)\n', (3294, 3306), True, 'import numpy as np\n'), ((3308, 3329), 'numpy.argmax', 'np.argmax', (['e'], {'axis': '(-1)'}), '(e, axis=-1)\n', (3317, 3329), True, 'import numpy as np\n'), ((4704, 4726), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4724, 4726), True, 'import tensorflow as tf\n'), ((418, 430), 'rdkit.Chem.RWMol', 'Chem.RWMol', ([], {}), '()\n', (428, 430), False, 'from rdkit import Chem\n'), ((4106, 4146), 'utils.molecular_metrics.MolecularMetrics.valid_total_score', 'MolecularMetrics.valid_total_score', (['mols'], {}), '(mols)\n', (4140, 4146), False, 'from utils.molecular_metrics import MolecularMetrics\n'), ((4180, 4221), 'utils.molecular_metrics.MolecularMetrics.unique_total_score', 'MolecularMetrics.unique_total_score', (['mols'], {}), '(mols)\n', (4215, 4221), False, 'from utils.molecular_metrics import MolecularMetrics\n'), ((4254, 4300), 'utils.molecular_metrics.MolecularMetrics.novel_total_score', 'MolecularMetrics.novel_total_score', (['mols', 'data'], {}), '(mols, data)\n', (4288, 4300), False, 'from utils.molecular_metrics import MolecularMetrics\n'), ((5127, 5148), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5146, 5148), True, 'import tensorflow as tf\n'), ((1733, 1745), 'rdkit.Chem.Atom', 'Chem.Atom', (['e'], {}), '(e)\n', (1742, 1745), False, 'from rdkit import Chem\n'), ((2777, 2789), 'rdkit.Chem.RWMol', 'Chem.RWMol', ([], {}), '()\n', (2787, 2789), False, 'from rdkit import Chem\n'), ((2944, 2963), 'numpy.vstack', 'np.vstack', (['(m0, m1)'], {}), '((m0, m1))\n', (2953, 2963), True, 'import numpy as np\n'), ((3576, 3632), 'utils.molecular_metrics.MolecularMetrics.natural_product_scores', 'MolecularMetrics.natural_product_scores', (['mols'], {'norm': 'norm'}), '(mols, norm=norm)\n', (3615, 3632), False, 'from utils.molecular_metrics import MolecularMetrics\n'), ((3655, 3721), 'utils.molecular_metrics.MolecularMetrics.quantitative_estimation_druglikeness_scores', 'MolecularMetrics.quantitative_estimation_druglikeness_scores', (['mols'], {}), '(mols)\n', (3715, 3721), False, 'from utils.molecular_metrics import MolecularMetrics\n'), ((3745, 3821), 'utils.molecular_metrics.MolecularMetrics.water_octanol_partition_coefficient_scores', 'MolecularMetrics.water_octanol_partition_coefficient_scores', (['mols'], {'norm': 'norm'}), '(mols, norm=norm)\n', (3804, 3821), False, 'from utils.molecular_metrics import MolecularMetrics\n'), ((3843, 3913), 'utils.molecular_metrics.MolecularMetrics.synthetic_accessibility_score_scores', 'MolecularMetrics.synthetic_accessibility_score_scores', (['mols'], {'norm': 'norm'}), '(mols, norm=norm)\n', (3896, 3913), False, 'from utils.molecular_metrics import MolecularMetrics\n'), ((3942, 3987), 'utils.molecular_metrics.MolecularMetrics.diversity_scores', 'MolecularMetrics.diversity_scores', (['mols', 'data'], {}), '(mols, data)\n', (3975, 3987), False, 'from utils.molecular_metrics import MolecularMetrics\n'), ((4020, 4069), 'utils.molecular_metrics.MolecularMetrics.drugcandidate_scores', 'MolecularMetrics.drugcandidate_scores', (['mols', 'data'], {}), '(mols, data)\n', (4057, 4069), False, 'from utils.molecular_metrics import MolecularMetrics\n')] |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.patches import Ellipse
import pickle
from os.path import dirname, join
import numpy as np
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from make_parameter import PreParam
plt.style.use('ggplot')
plt.rc('text', usetex=True)
plt.rc('font', size=8,family='Times New Roman')
# plt.rcParams['xtick.direction'] = 'in'
# plt.rcParams['ytick.direction'] = 'out'
plt.rcParams['ytick.major.width'] = 0.4
plt.rcParams['xtick.major.width'] = 0.4
plt.rcParams['xtick.minor.width'] = 0.4
plt.rcParams['xtick.color'] = 'black'
plt.rcParams['ytick.color'] = 'black'
color = list(plt.rcParams['axes.prop_cycle'])
def get_result(casename, mode, opt_num):
'''
load the opt results from saved file
'''
path_result = join(dirname(__file__), 'result//' + 'result-' + casename + '-mode_' + str(mode) + '-opt_num_' + str(opt_num) + '.p')
with open(path_result, 'rb') as fp:
opt_result = pickle.load(fp)
return opt_result
def get_np(casename_real, N_s, N_s_k, N_s_i):
path_casedata = join(dirname(__file__), 'data//casedata//'+ casename_real +'.py')
case_real = PreParam(path_casedata, [(1,1),(2,2),(3,1),(4,2)], 23)
epsilon_n = len(case_real.set_k_i) * (
len(case_real.i_gen) * ( 1 + 2*(3 + 1) )**2 +
len(case_real.i_gen) * ( 1 + 2 + (3 + 1) )**2 +
sum( ( 1 + len(case_real.clique_tree_theta['node'][i_clique]) * (1 + 3) )**2 for i_clique in case_real.clique_tree_theta['node'].keys() ) +
(4 + ((3 + 3 + 1)**2)) * len(case_real.i_all) * (3 + 1)
)
epsilon_p = (
(2 + 3) * N_s * len(case_real.i_gen) + 2**2 * 2 * ( N_s_k * (N_s_i - 1) ) * len(case_real.i_gen) + sum( (len(tong) + 1) + 2*len(tong) for tong in case_real.clique_tree_theta['node_gl'].values()) * ( N_s_k * (N_s_i - 1) ) * 2
+
len(case_real.set_k_i) * (
len(case_real.i_gen) * ( 1 + 2*(3 + 1) )**2 +
len(case_real.i_gen) * ( 1 + 2 + (3 + 1) )**2 +
sum( ( 1 + len(case_real.clique_tree_theta['node'][i_clique]) * (1 + 3) )**2 for i_clique in case_real.clique_tree_theta['node'].keys() ) +
((3 + 3 + 1)**2) * len(case_real.i_all) * (3 + 1)
)
)
return [epsilon_n, epsilon_p]
casename_real = 'case9_1'
epsilon_abs = 1e-5
epsilon_rel = 1e-4
[epsilon_n, epsilon_p] = get_np(casename_real, N_s = 20, N_s_k = 4, N_s_i = 5)
ratio_obj = 1
CASE = ['case 1', 'case 2', 'case 3', 'case 4', 'case 5', 'case 6']
mode = 23
opt_num = 'd_1_2_3_4-reduced'
CASENAME = { 'case 1': 'case9_1' ,
'case 2': 'case9_2' ,
'case 3': 'case9_3' ,
'case 4': 'case9_4' ,
'case 5': 'case9_5' ,
'case 6': 'case9_6' }
J_sin = { 'case 1': 187.950583735717,
'case 2': 265.611264602550,
'case 3': 106.744870785089,
'case 4': 61.5471940086659,
'case 5': 108.049706103686,
'case 6': 71.1233061490970 }
J_iter_0 = dict()
r_norm_2_0 = dict()
s_norm_2_0 = dict()
epsilon_pri_0 = dict()
epsilon_dual_0 = dict()
term_epsilon_pri = dict()
term_epsilon_dual = dict()
epsilon_p_0 = dict()
epsilon_n_0 = dict()
solve_time_0 = dict()
for case in CASE:
print(case)
opt_result = get_result(CASENAME[case], mode, opt_num)
J_iter_0[case] = np.array(list(opt_result.J_iter.values()))
r_norm_2_0[case] = np.array(list(opt_result.r_norm_2.values()))
s_norm_2_0[case] = np.array(list(opt_result.s_norm_2.values()))
epsilon_pri_0[case] = np.array(list(opt_result.epsilon_pri.values()))
epsilon_dual_0[case] = np.array(list(opt_result.epsilon_dual.values()))
term_epsilon_pri[case] = np.array(list(opt_result.term_epsilon_pri.values()))
term_epsilon_dual[case] = np.array(list(opt_result.term_epsilon_dual.values()))
epsilon_p_0[case] = opt_result.epsilon_p
epsilon_n_0[case] = opt_result.epsilon_n
solve_time_0[case] = opt_result.solve_time
J_iter = dict()
r_norm_2 = dict()
s_norm_2 = dict()
epsilon_pri = dict()
epsilon_dual = dict()
for case in CASE:
J_iter[case] = J_iter_0[case] * ratio_obj
r_norm_2[case] = r_norm_2_0[case] * (((epsilon_p**(3/4))/epsilon_p_0[case])**0.5)
s_norm_2[case] = s_norm_2_0[case] * ((epsilon_n/epsilon_n_0[case])**0.5)
epsilon_pri[case] = ( epsilon_p )**0.5 * epsilon_abs + epsilon_rel * term_epsilon_pri[case] * (((epsilon_p**(3/4))/epsilon_p_0[case])**0.5)
epsilon_dual[case] = ( epsilon_n )**0.5 * epsilon_abs + epsilon_rel * term_epsilon_dual[case] * ((epsilon_n/epsilon_n_0[case])**0.5)
k_r_meet = dict()
k_s_meet = dict()
for case in CASE:
k_r_meet[case] = np.where( r_norm_2[case] - epsilon_pri[case] < 0 )[0][0]
k_s_meet[case] = np.where( s_norm_2[case] - epsilon_dual[case] < 0 )[0][0]
# plot figure
path_fig = join(dirname(__file__), 'result//' + 'fig-cs-2.pdf' )
fig, axs = plt.subplots(2,3, figsize=(3.6,1.6))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.155, hspace=0.27)
kappa = range(1,101)
case_subfigure = { 'case 1': (0, 0) ,
'case 2': (0, 1) ,
'case 3': (0, 2) ,
'case 4': (1, 0) ,
'case 5': (1, 1) ,
'case 6': (1, 2) }
for case in CASE:
axs[case_subfigure[case]].semilogy(kappa, r_norm_2[case], '-', linewidth=0.8, label = '$||r||_2$', basey=10 , color = color[1]['color'])
axs[case_subfigure[case]].semilogy(kappa, s_norm_2[case], '-', linewidth=0.8, label = '$||s||_2$', basey=10 , color = color[0]['color'])
axs[case_subfigure[case]].semilogy(kappa, epsilon_pri[case], '--', linewidth=0.8, label = '$\\epsilon^{\\rm{pri}}$', basey=10 , color = color[1]['color'])
axs[case_subfigure[case]].semilogy(kappa, epsilon_dual[case], '--', linewidth=0.8, label = '$\\epsilon^{\\rm{dual}}$', basey=10 , color = color[0]['color'])
axs[case_subfigure[case]].axvline(k_r_meet[case]+1, color=color[1]['color'],linestyle="dotted", linewidth=0.8)
axs[case_subfigure[case]].axvline(k_s_meet[case]+1, color=color[0]['color'],linestyle="dotted", linewidth=0.8)
for i in [0,1]:
for j in [0,1,2]:
axs[i,j].set_xticks([1,20,40,60,80,100] )
axs[i,j].set_xlim(1, 100)
axs[i,j].set_ylim(1e-3, 1e1)
axs[i,j].set_yticks([1e-3, 1e-2, 1e-1, 1e0, 1e1] )
axs[0,1].set_yticklabels([] )
axs[0,2].set_yticklabels([] )
axs[1,1].set_yticklabels([] )
axs[1,2].set_yticklabels([] )
axs[0,0].set_xticklabels([] )
axs[0,1].set_xticklabels([] )
axs[0,2].set_xticklabels([] )
axs[0,1].yaxis.set_tick_params(color = 'white')
axs[0,2].yaxis.set_tick_params(color = 'white')
axs[1,1].yaxis.set_tick_params(color = 'white')
axs[1,2].yaxis.set_tick_params(color = 'white')
axs[0,0].xaxis.set_tick_params(color = 'white')
axs[0,1].xaxis.set_tick_params(color = 'white')
axs[0,2].xaxis.set_tick_params(color = 'white')
axs[1,0].set_xticklabels([1, 20, 40, 60, 80, 100], fontsize = 7)
axs[1,1].set_xticklabels([1, 20, 40, 60, 80, 100], fontsize = 7)
axs[1,2].set_xticklabels([1, 20, 40, 60, 80, 100], fontsize = 7)
axs[0,0].set_yticklabels(['$10^{-3}$', '$10^{-2}$', '$10^{-1}$', '$10^{0}$', '$10^{1}$'], fontsize = 7)
axs[1,0].set_yticklabels(['$10^{-3}$', '$10^{-2}$', '$10^{-1}$', '$10^{0}$', '$10^{1}$'], fontsize = 7)
xminorLocator = MultipleLocator(10)
axs[1,0].xaxis.set_minor_locator(xminorLocator)
axs[1,1].xaxis.set_minor_locator(xminorLocator)
axs[1,2].xaxis.set_minor_locator(xminorLocator)
axs[1,1].set_xlabel('$\\kappa$', fontsize = 8)
fig.text(0.03, 0.5, '$||r||_2$, $||s||_2$, $\\epsilon^{\\rm{pri}}$, $\\epsilon^{\\rm{dual}}$', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 8)
for i in [0,1]:
for j in [0,1,2]:
axs[i,j].set_title('Case '+ str(i*3 + j + 1 ), {'fontsize': 8}, pad = 0 , fontsize = 7.25)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=(-2.3,15 ),
ncol=4, numpoints=10, fancybox = False, framealpha = 0.6, edgecolor = 'white', columnspacing= 1)
axs[0,0].text(52, 0.5, '$\\kappa$=52', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[0,0].text(65, 0.5, '$\\kappa$=53', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[0,1].text(27, 0.5, '$\\kappa$=27', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[0,1].text(50, 0.5, '$\\kappa$=50', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[0,2].text(50, 0.5, '$\\kappa$=50', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[1,0].text(24, 0.5, '$\\kappa$=24', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[1,0].text(53, 0.5, '$\\kappa$=53', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[1,1].text(30, 0.5, '$\\kappa$=30', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[1,1].text(49, 0.5, '$\\kappa$=49', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[1,2].text(19, 0.5, '$\\kappa$=19', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
axs[1,2].text(48, 0.5, '$\\kappa$=48', rotation= 'vertical', va = 'center', ha = 'right',fontsize = 6)
fig.savefig(path_fig, dpi = 300, transparent=False, bbox_inches='tight')
plt.close()
'''
J_iter = dict()
r_norm_2 = dict()
s_norm_2 = dict()
epsilon_pri = dict()
epsilon_dual = dict()
for opt_num in ['d11']:#, 'd2', 'd3', 'd4', 'd5']:
path_result = join(dirname(__file__), 'result//case-9//' + 'result-' + casename + '-mode_' + str(mode) + '-opt_num_' + str(opt_num) + '.p')
with open(path_result, 'rb') as fpr:
opt_result = pickle.load(fpr)
np.array(list(opt_result.r_norm_2.values()))
J_iter[opt_num] = np.array(list(opt_result.J_iter.values()))
r_norm_2[opt_num] = np.array(list(opt_result.r_norm_2.values()))
s_norm_2[opt_num] = np.array(list(opt_result.s_norm_2.values()))
epsilon_pri[opt_num] = np.array(list(opt_result.epsilon_pri.values()))
epsilon_dual[opt_num] = np.array(list(opt_result.epsilon_dual.values()))
'''
'''
f, axarr = plt.subplots(5,8)
f.set_figheight(4.708)
f.set_figwidth(10)
f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0, hspace=0)
for i in range(1,40):
s = 'BUS-'+str(i)
m, n = (i-1)/8, i-((i-1)/8)*8-1
axarr[m,n].plot(ul[1]['Time'].values[:553], u[s].mean(axis=1), color='indianred', alpha=1, linewidth = 1.2)
axarr[m,n].plot(ul[1]['Time'].values[:553], u[s].std(axis=1),'--', color='indianred', alpha=1, linewidth = 1.2)
axarr[m,n].plot(ule[1]['Time'].values[:553], ue[s].mean(axis=1), color='steelblue', alpha=1, linewidth = 1.2)
axarr[m,n].plot(ule[1]['Time'].values[:553], ue[s].std(axis=1),'--', color='steelblue', alpha=1, linewidth = 1.2)
axarr[m,n+1].plot(ule[1]['Time'].values[:553], ue[s].std(axis=1),'--', color='white', alpha=0, linewidth = 1.2)
for i in range(5):
for j in range(8):
axarr[i, j].set_yticks([0, 0.5, 1.0] )
axarr[i, j].set_yticklabels([0, 0.5, 1.0])
axarr[i, j].set_ylim(-0.05,1.2)
axarr[i, j].set_xticks([0,200] )
axarr[i, j].set_xticklabels([0,4.0])
for i in range(4):
plt.setp([a.get_xticklabels() for a in axarr[i, :]], visible=False)
for i in range(1,8):
plt.setp([a.get_yticklabels() for a in axarr[:, i]], visible=False)
f.text(0.08,0.5,'Mean or standard deviation of $V (p.u.)$', family ='serif',horizontalalignment='center', verticalalignment='center',rotation='vertical')
f.text(0.5,0.052,'$t (s)$', family ='serif',horizontalalignment='center', verticalalignment='center')
path_fig = os.path.join(cwd,'result\\0_'+str(fau)+'.eps')
f.savefig(path_fig, dpi = 300, transparent=False, bbox_inches='tight')
''' | [
"make_parameter.PreParam",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"os.path.dirname",
"matplotlib.pyplot.style.use",
"pickle.load",
"numpy.where",
"matplotlib.pyplot.rc",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.subplots"
] | [((308, 331), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (321, 331), True, 'import matplotlib.pyplot as plt\n'), ((333, 360), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (339, 360), True, 'import matplotlib.pyplot as plt\n'), ((361, 409), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(8)', 'family': '"""Times New Roman"""'}), "('font', size=8, family='Times New Roman')\n", (367, 409), True, 'import matplotlib.pyplot as plt\n'), ((5073, 5111), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(3.6, 1.6)'}), '(2, 3, figsize=(3.6, 1.6))\n', (5085, 5111), True, 'import matplotlib.pyplot as plt\n'), ((7533, 7552), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (7548, 7552), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((8044, 8208), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(-2.3, 15)', 'ncol': '(4)', 'numpoints': '(10)', 'fancybox': '(False)', 'framealpha': '(0.6)', 'edgecolor': '"""white"""', 'columnspacing': '(1)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=(-2.3, 15), ncol=4,\n numpoints=10, fancybox=False, framealpha=0.6, edgecolor='white',\n columnspacing=1)\n", (8054, 8208), True, 'import matplotlib.pyplot as plt\n'), ((9431, 9442), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9440, 9442), True, 'import matplotlib.pyplot as plt\n'), ((1222, 1283), 'make_parameter.PreParam', 'PreParam', (['path_casedata', '[(1, 1), (2, 2), (3, 1), (4, 2)]', '(23)'], {}), '(path_casedata, [(1, 1), (2, 2), (3, 1), (4, 2)], 23)\n', (1230, 1283), False, 'from make_parameter import PreParam\n'), ((5013, 5030), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (5020, 5030), False, 'from os.path import dirname, join\n'), ((859, 876), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (866, 876), False, 'from os.path import dirname, join\n'), ((1034, 1049), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1045, 1049), False, 'import pickle\n'), ((1144, 1161), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (1151, 1161), False, 'from os.path import dirname, join\n'), ((4844, 4892), 'numpy.where', 'np.where', (['(r_norm_2[case] - epsilon_pri[case] < 0)'], {}), '(r_norm_2[case] - epsilon_pri[case] < 0)\n', (4852, 4892), True, 'import numpy as np\n'), ((4922, 4971), 'numpy.where', 'np.where', (['(s_norm_2[case] - epsilon_dual[case] < 0)'], {}), '(s_norm_2[case] - epsilon_dual[case] < 0)\n', (4930, 4971), True, 'import numpy as np\n')] |
from i3Deep import utils
import numpy as np
from tqdm import tqdm
import os
import json
import copy
import shutil
from pathlib import Path
def combine(image_path, prediction_path, chosen_slices_path, save_path, depth):
shutil.rmtree(save_path, ignore_errors=True)
Path(save_path).mkdir(parents=True, exist_ok=True)
image_filenames = utils.load_filenames(image_path)
for image_filename in tqdm(image_filenames):
name = os.path.basename(image_filename)[:-12]
image, affine, spacing, header = utils.load_nifty(image_filename)
prediction, _, _, _ = utils.load_nifty(prediction_path + name + ".nii.gz")
with open(chosen_slices_path + name + ".json") as f:
chosen_slices = json.load(f)
comined_slices_image_dim0, comined_slices_prediction_dim0 = combine_slices(copy.deepcopy(image), copy.deepcopy(prediction), chosen_slices["Sagittal"], 0, depth)
comined_slices_image_dim1, comined_slices_prediction_dim1 = combine_slices(copy.deepcopy(image), copy.deepcopy(prediction), chosen_slices["Coronal"], 1, depth)
comined_slices_image_dim2, comined_slices_prediction_dim2 = combine_slices(copy.deepcopy(image), copy.deepcopy(prediction), chosen_slices["Axial"], 2, depth)
utils.save_nifty(save_path + name + "_sagittal_image.nii.gz", comined_slices_image_dim0, affine, spacing, header)
utils.save_nifty(save_path + name + "_sagittal_presegmentation.nii.gz", comined_slices_prediction_dim0, affine, spacing, header, is_mask=True)
utils.save_nifty(save_path + name + "_coronal_image.nii.gz", comined_slices_image_dim1, affine, spacing, header)
utils.save_nifty(save_path + name + "_coronal_presegmentation.nii.gz", comined_slices_prediction_dim1, affine, spacing, header, is_mask=True)
utils.save_nifty(save_path + name + "_axial_image.nii.gz", comined_slices_image_dim2, affine, spacing, header)
utils.save_nifty(save_path + name + "_axial_presegmentation.nii.gz", comined_slices_prediction_dim2, affine, spacing, header, is_mask=True)
def combine_slices(image, prediction, indices, dim, depth):
# np.moveaxis(uncertainty, axis, 0)[index, :, :]
image_min_value = image.min()
image_max_value = image.max()
prediction_min_value = prediction.min()
prediction_max_value = prediction.max()
indices_with_depth, original_indices_mask = [], []
for index in indices:
index_with_depth = list(range(index - depth, index + depth + 1))
original_index_mask = ([0] * (depth*2+1))
original_index_mask[depth] = 1
indices_with_depth.extend(index_with_depth)
original_indices_mask.extend(original_index_mask)
slices_image = np.moveaxis(image, dim, 0)[indices_with_depth, :, :]
slices_prediction = np.moveaxis(prediction, dim, 0)[indices_with_depth, :, :]
for i in range(len(original_indices_mask)):
if original_indices_mask[i] == 1:
slices_image[i] = add_checkerboard_marker(slices_image[i], image_min_value, image_max_value)
slices_prediction[i] = add_checkerboard_marker(slices_prediction[i], prediction_min_value, prediction_max_value) * 2 # "*2" to change the color of the segmentation for that slice
slices_image = np.moveaxis(slices_image, 0, dim)
slices_prediction = np.moveaxis(slices_prediction, 0, dim)
return slices_image, slices_prediction
def add_checkerboard_marker(slice, min_value, max_value, size=8):
cell_size = (np.asarray(slice.shape) * 0.01).astype(int)
for i in range(0, size, 2):
slice[i * cell_size[0]:(i + 1) * cell_size[0], i * cell_size[1]:(i + 1) * cell_size[1]] = min_value
slice[(i + 1) * cell_size[0]:(i + 2) * cell_size[0], (i + 1) * cell_size[1]:(i + 2) * cell_size[1]] = max_value
return slice
if __name__ == '__main__':
depth = 5
image_path = "/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task070_guided_all_public_ggo/refinement_test/images/"
prediction_path = "/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task070_guided_all_public_ggo/refinement_test/basic_predictions/"
chosen_slices_path = "/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task070_guided_all_public_ggo/refinement_test/choosen_slices_export/V7/my_method/"
save_path = "/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task070_guided_all_public_ggo/refinement_test/combined_slices_depth{}/".format(depth)
combine(image_path, prediction_path, chosen_slices_path, save_path, depth=depth)
| [
"i3Deep.utils.save_nifty",
"tqdm.tqdm",
"numpy.moveaxis",
"json.load",
"copy.deepcopy",
"os.path.basename",
"numpy.asarray",
"pathlib.Path",
"i3Deep.utils.load_nifty",
"shutil.rmtree",
"i3Deep.utils.load_filenames"
] | [((236, 280), 'shutil.rmtree', 'shutil.rmtree', (['save_path'], {'ignore_errors': '(True)'}), '(save_path, ignore_errors=True)\n', (249, 280), False, 'import shutil\n'), ((362, 394), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['image_path'], {}), '(image_path)\n', (382, 394), False, 'from i3Deep import utils\n'), ((424, 445), 'tqdm.tqdm', 'tqdm', (['image_filenames'], {}), '(image_filenames)\n', (428, 445), False, 'from tqdm import tqdm\n'), ((3310, 3343), 'numpy.moveaxis', 'np.moveaxis', (['slices_image', '(0)', 'dim'], {}), '(slices_image, 0, dim)\n', (3321, 3343), True, 'import numpy as np\n'), ((3369, 3407), 'numpy.moveaxis', 'np.moveaxis', (['slices_prediction', '(0)', 'dim'], {}), '(slices_prediction, 0, dim)\n', (3380, 3407), True, 'import numpy as np\n'), ((544, 576), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['image_filename'], {}), '(image_filename)\n', (560, 576), False, 'from i3Deep import utils\n'), ((608, 660), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (["(prediction_path + name + '.nii.gz')"], {}), "(prediction_path + name + '.nii.gz')\n", (624, 660), False, 'from i3Deep import utils\n'), ((1286, 1403), 'i3Deep.utils.save_nifty', 'utils.save_nifty', (["(save_path + name + '_sagittal_image.nii.gz')", 'comined_slices_image_dim0', 'affine', 'spacing', 'header'], {}), "(save_path + name + '_sagittal_image.nii.gz',\n comined_slices_image_dim0, affine, spacing, header)\n", (1302, 1403), False, 'from i3Deep import utils\n'), ((1409, 1555), 'i3Deep.utils.save_nifty', 'utils.save_nifty', (["(save_path + name + '_sagittal_presegmentation.nii.gz')", 'comined_slices_prediction_dim0', 'affine', 'spacing', 'header'], {'is_mask': '(True)'}), "(save_path + name + '_sagittal_presegmentation.nii.gz',\n comined_slices_prediction_dim0, affine, spacing, header, is_mask=True)\n", (1425, 1555), False, 'from i3Deep import utils\n'), ((1563, 1679), 'i3Deep.utils.save_nifty', 'utils.save_nifty', (["(save_path + name + '_coronal_image.nii.gz')", 'comined_slices_image_dim1', 'affine', 'spacing', 'header'], {}), "(save_path + name + '_coronal_image.nii.gz',\n comined_slices_image_dim1, affine, spacing, header)\n", (1579, 1679), False, 'from i3Deep import utils\n'), ((1685, 1830), 'i3Deep.utils.save_nifty', 'utils.save_nifty', (["(save_path + name + '_coronal_presegmentation.nii.gz')", 'comined_slices_prediction_dim1', 'affine', 'spacing', 'header'], {'is_mask': '(True)'}), "(save_path + name + '_coronal_presegmentation.nii.gz',\n comined_slices_prediction_dim1, affine, spacing, header, is_mask=True)\n", (1701, 1830), False, 'from i3Deep import utils\n'), ((1838, 1952), 'i3Deep.utils.save_nifty', 'utils.save_nifty', (["(save_path + name + '_axial_image.nii.gz')", 'comined_slices_image_dim2', 'affine', 'spacing', 'header'], {}), "(save_path + name + '_axial_image.nii.gz',\n comined_slices_image_dim2, affine, spacing, header)\n", (1854, 1952), False, 'from i3Deep import utils\n'), ((1958, 2101), 'i3Deep.utils.save_nifty', 'utils.save_nifty', (["(save_path + name + '_axial_presegmentation.nii.gz')", 'comined_slices_prediction_dim2', 'affine', 'spacing', 'header'], {'is_mask': '(True)'}), "(save_path + name + '_axial_presegmentation.nii.gz',\n comined_slices_prediction_dim2, affine, spacing, header, is_mask=True)\n", (1974, 2101), False, 'from i3Deep import utils\n'), ((2759, 2785), 'numpy.moveaxis', 'np.moveaxis', (['image', 'dim', '(0)'], {}), '(image, dim, 0)\n', (2770, 2785), True, 'import numpy as np\n'), ((2837, 2868), 'numpy.moveaxis', 'np.moveaxis', (['prediction', 'dim', '(0)'], {}), '(prediction, dim, 0)\n', (2848, 2868), True, 'import numpy as np\n'), ((286, 301), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (290, 301), False, 'from pathlib import Path\n'), ((463, 495), 'os.path.basename', 'os.path.basename', (['image_filename'], {}), '(image_filename)\n', (479, 495), False, 'import os\n'), ((754, 766), 'json.load', 'json.load', (['f'], {}), '(f)\n', (763, 766), False, 'import json\n'), ((853, 873), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (866, 873), False, 'import copy\n'), ((875, 900), 'copy.deepcopy', 'copy.deepcopy', (['prediction'], {}), '(prediction)\n', (888, 900), False, 'import copy\n'), ((1023, 1043), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (1036, 1043), False, 'import copy\n'), ((1045, 1070), 'copy.deepcopy', 'copy.deepcopy', (['prediction'], {}), '(prediction)\n', (1058, 1070), False, 'import copy\n'), ((1192, 1212), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (1205, 1212), False, 'import copy\n'), ((1214, 1239), 'copy.deepcopy', 'copy.deepcopy', (['prediction'], {}), '(prediction)\n', (1227, 1239), False, 'import copy\n'), ((3543, 3566), 'numpy.asarray', 'np.asarray', (['slice.shape'], {}), '(slice.shape)\n', (3553, 3566), True, 'import numpy as np\n')] |
import numpy as np
from scipy import stats
import statsmodels.api as sm
import pandas as pd
import numpy.linalg as npl
import matplotlib.pyplot as plt
import numpy.random as npr
import time
import seaborn as sns
from tqdm import tqdm
from scipy.stats import norm
import scipy.optimize as opt
import math
def fnDataImport(bDropNA=True):
df = pd.read_excel('D:\QRM_Program\Research_Project\MarketData.xlsx', parse_dates=['Quarter'], index_col='Quarter')
df.columns = ["GDP", "WTI", "HPI", "SMX", "ASCX"]
if bDropNA:
return df.dropna()
else:
return df
###########################################################################
'''MJD_calibration'''
T = 12
Nsteps = 12
Delta_t = T / Nsteps
vTheta = [0.22, 0.15, 0.08, 0.02, 20]
def MJD_calibration(vTheta, Series):
mu_d = vTheta[0]
sigma_d = np.exp(vTheta[1])
mu_j = vTheta[2]
sigma_j = vTheta[3]
Lambda = vTheta[4]
GDP = Series
for k in range(0, 20):
mean = (mu_d - sigma_d**2 / 2 * Delta_t) + (mu_j * k)
std = (sigma_d ** 2 * Delta_t + sigma_j**2 * k)
xpdfs = norm.pdf(x=GDP, loc=mean, scale=np.sqrt(std))
pk_denominator = (Lambda * Delta_t) ** k / np.math.factorial(k)
ln_Pk = (pk_denominator * np.exp(-Lambda*Delta_t))
obj = - np.sum(np.log(xpdfs + ln_Pk))
return obj
dfFull = fnDataImport(bDropNA=False)
dfReturnFull = np.log(dfFull).diff()
df_new = dfReturnFull.dropna()
Xs = np.zeros((5, 5))
for i in range(0,5):
seriesI = df_new.iloc[:, i].values
res= opt.minimize(MJD_calibration, vTheta, args=seriesI, method='Nelder-Mead')
print(res.message)
Xs[i,:] = res.x
###############################################################################################
'''MJD_simulation'''
def jump_diffusion (S, mu, sigma, Lambdas, Nsim, NAssets, T, Delta_t, Nsteps, log_corr, jumps_mu, jumps_sigma):
decomposition = np.linalg.cholesky(log_corr)
simulated_paths = np.zeros([Nsteps+1, Nsim, NAssets])
simulated_paths[0, :,: ] = S
for sim in tqdm(range(Nsim)):
Z_1 = np.random.normal(0., 1., size=( Nsteps + 1, NAssets ))
Z_2 = np.random.normal(0., 1., size=( Nsteps + 1, NAssets ))
Poisson = np.random.poisson(lam=Lambdas*Delta_t, size=(Nsteps + 1, NAssets))
Z_1_1 = Z_1 @ decomposition
Z_2_1 = Z_2 @ decomposition
for i in range(1, Nsteps + 1):
musigmaDelta = (mu - sigma**2/2) * Delta_t
sigmasqrtDelta = sigma * np.sqrt(Delta_t)
expPar1 = musigmaDelta + sigmasqrtDelta * Z_1_1[i,:]
expPar2 = jumps_mu * Poisson[i, :] + jumps_sigma * np.sqrt(Poisson[i, :]) * Z_2_1[i, :]
simulated_paths[i, sim,: ] = simulated_paths[i-1, sim,: ] * np.exp(expPar1 + expPar2)
return simulated_paths
dfReturnFullClipped = df_new.copy()
dfReturnFullClipped[['GDP', 'HPI']] = dfReturnFull[['GDP', 'HPI']].clip(lower=dfReturnFull['GDP'].quantile(0.01), upper=dfReturnFull['GDP'].quantile(0.99))
#dfReturnFullClipped
mu = Xs[:,0]
sigma = np.exp(Xs[:,1])
sigma
jumps_mu = Xs[:,2]
jumps_mu
jumps_sigma = np.exp( Xs[:,3] )
jumps_sigma
Lambdas = Xs[:,4]
Lambdas
S = dfReturnFullClipped.iloc[-1].values
S
Nsim = 1000
NAssets = len(S)
T = 12
Nsteps = 12
Delta_t = T / Nsteps
log_corr = df_new.corr()
mS = jump_diffusion (S, mu, sigma, Lambdas, Nsim, NAssets, T, Delta_t, Nsteps, log_corr, jumps_mu, jumps_sigma)
plt.figure(figsize=(12, 10))
plt.subplot(3, 2, 1)
plt.plot(mS[:, :, 0])
plt.subplot(3, 2, 2)
plt.plot(mS[:, :, 1])
plt.subplot(3, 2, 3)
plt.plot(mS[:, :, 2])
plt.subplot(3, 2, 4)
plt.plot(mS[:, :, 3])
plt.subplot(3, 2, 5)
plt.plot(mS[:, :, 4])
plt.show()
############################################################################
''' Comparison '''
mHistorical = dfFull.pct_change(12).values
fig = plt.figure(figsize=(8, 6))
fig.suptitle('Simulating %i paths for %i assets' % (Nsim, len(dfFull.columns)))
columns = 2
rows = 3
list_assets = ["GDP", "WTI", "HPI", "SMX", "ASCX"]
for i in range(1, 6):
fig.add_subplot(rows, columns, i)
plt.hist([np.sum(mS[-12:, :, i-1], axis=0), mHistorical[:, i-1]], color=['g', 'r'],
label=['Generated 3Y-change Asset '+list_assets[i-1],
'Historical 3Y-change Asset '+list_assets[i-1]], bins=40, density=True)
plt.legend()
plt.show()
| [
"matplotlib.pyplot.subplot",
"scipy.optimize.minimize",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.sum",
"matplotlib.pyplot.legend",
"numpy.zeros",
"pandas.read_excel",
"matplotlib.pyplot.figure",
"numpy.math.factorial",
"numpy.exp",
"numpy.random.normal",
"num... | [((1453, 1469), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (1461, 1469), True, 'import numpy as np\n'), ((3037, 3053), 'numpy.exp', 'np.exp', (['Xs[:, 1]'], {}), '(Xs[:, 1])\n', (3043, 3053), True, 'import numpy as np\n'), ((3101, 3117), 'numpy.exp', 'np.exp', (['Xs[:, 3]'], {}), '(Xs[:, 3])\n', (3107, 3117), True, 'import numpy as np\n'), ((3409, 3437), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (3419, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3438, 3458), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (3449, 3458), True, 'import matplotlib.pyplot as plt\n'), ((3459, 3480), 'matplotlib.pyplot.plot', 'plt.plot', (['mS[:, :, 0]'], {}), '(mS[:, :, 0])\n', (3467, 3480), True, 'import matplotlib.pyplot as plt\n'), ((3482, 3502), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(2)'], {}), '(3, 2, 2)\n', (3493, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3503, 3524), 'matplotlib.pyplot.plot', 'plt.plot', (['mS[:, :, 1]'], {}), '(mS[:, :, 1])\n', (3511, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3546), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (3537, 3546), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3568), 'matplotlib.pyplot.plot', 'plt.plot', (['mS[:, :, 2]'], {}), '(mS[:, :, 2])\n', (3555, 3568), True, 'import matplotlib.pyplot as plt\n'), ((3570, 3590), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (3581, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3612), 'matplotlib.pyplot.plot', 'plt.plot', (['mS[:, :, 3]'], {}), '(mS[:, :, 3])\n', (3599, 3612), True, 'import matplotlib.pyplot as plt\n'), ((3614, 3634), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (3625, 3634), True, 'import matplotlib.pyplot as plt\n'), ((3635, 3656), 'matplotlib.pyplot.plot', 'plt.plot', (['mS[:, :, 4]'], {}), '(mS[:, :, 4])\n', (3643, 3656), True, 'import matplotlib.pyplot as plt\n'), ((3657, 3667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3665, 3667), True, 'import matplotlib.pyplot as plt\n'), ((3816, 3842), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (3826, 3842), True, 'import matplotlib.pyplot as plt\n'), ((4319, 4329), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4327, 4329), True, 'import matplotlib.pyplot as plt\n'), ((347, 464), 'pandas.read_excel', 'pd.read_excel', (['"""D:\\\\QRM_Program\\\\Research_Project\\\\MarketData.xlsx"""'], {'parse_dates': "['Quarter']", 'index_col': '"""Quarter"""'}), "('D:\\\\QRM_Program\\\\Research_Project\\\\MarketData.xlsx',\n parse_dates=['Quarter'], index_col='Quarter')\n", (360, 464), True, 'import pandas as pd\n'), ((835, 852), 'numpy.exp', 'np.exp', (['vTheta[1]'], {}), '(vTheta[1])\n', (841, 852), True, 'import numpy as np\n'), ((1539, 1612), 'scipy.optimize.minimize', 'opt.minimize', (['MJD_calibration', 'vTheta'], {'args': 'seriesI', 'method': '"""Nelder-Mead"""'}), "(MJD_calibration, vTheta, args=seriesI, method='Nelder-Mead')\n", (1551, 1612), True, 'import scipy.optimize as opt\n'), ((1908, 1936), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['log_corr'], {}), '(log_corr)\n', (1926, 1936), True, 'import numpy as np\n'), ((1959, 1996), 'numpy.zeros', 'np.zeros', (['[Nsteps + 1, Nsim, NAssets]'], {}), '([Nsteps + 1, Nsim, NAssets])\n', (1967, 1996), True, 'import numpy as np\n'), ((4306, 4318), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4316, 4318), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1408), 'numpy.log', 'np.log', (['dfFull'], {}), '(dfFull)\n', (1400, 1408), True, 'import numpy as np\n'), ((2077, 2131), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)'], {'size': '(Nsteps + 1, NAssets)'}), '(0.0, 1.0, size=(Nsteps + 1, NAssets))\n', (2093, 2131), True, 'import numpy as np\n'), ((2146, 2200), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)'], {'size': '(Nsteps + 1, NAssets)'}), '(0.0, 1.0, size=(Nsteps + 1, NAssets))\n', (2162, 2200), True, 'import numpy as np\n'), ((2219, 2287), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': '(Lambdas * Delta_t)', 'size': '(Nsteps + 1, NAssets)'}), '(lam=Lambdas * Delta_t, size=(Nsteps + 1, NAssets))\n', (2236, 2287), True, 'import numpy as np\n'), ((1199, 1219), 'numpy.math.factorial', 'np.math.factorial', (['k'], {}), '(k)\n', (1216, 1219), True, 'import numpy as np\n'), ((1254, 1279), 'numpy.exp', 'np.exp', (['(-Lambda * Delta_t)'], {}), '(-Lambda * Delta_t)\n', (1260, 1279), True, 'import numpy as np\n'), ((4069, 4103), 'numpy.sum', 'np.sum', (['mS[-12:, :, i - 1]'], {'axis': '(0)'}), '(mS[-12:, :, i - 1], axis=0)\n', (4075, 4103), True, 'import numpy as np\n'), ((1133, 1145), 'numpy.sqrt', 'np.sqrt', (['std'], {}), '(std)\n', (1140, 1145), True, 'import numpy as np\n'), ((1302, 1323), 'numpy.log', 'np.log', (['(xpdfs + ln_Pk)'], {}), '(xpdfs + ln_Pk)\n', (1308, 1323), True, 'import numpy as np\n'), ((2492, 2508), 'numpy.sqrt', 'np.sqrt', (['Delta_t'], {}), '(Delta_t)\n', (2499, 2508), True, 'import numpy as np\n'), ((2748, 2773), 'numpy.exp', 'np.exp', (['(expPar1 + expPar2)'], {}), '(expPar1 + expPar2)\n', (2754, 2773), True, 'import numpy as np\n'), ((2638, 2660), 'numpy.sqrt', 'np.sqrt', (['Poisson[i, :]'], {}), '(Poisson[i, :])\n', (2645, 2660), True, 'import numpy as np\n')] |
import numpy as np
class Detector:
def __init__(self, subnets, merger):
self.subnets = subnets
self.merger = merger
def detect(self, img):
outputs=[]
for subnet in self.subnets:
outputs.append(subnet.feedforward(img)[0][0])
outputs=np.asarray(outputs)
outputs=outputs.reshape(len(outputs),1)
result = self.merger.feedforward(outputs)[0][0]
result = int(result+0.5)
return result
def evaluate(self, test_data):
test_results = [(self.detect(x), y[0][0]) for (x, y) in test_data]
return sum((x==y) for (x, y) in test_results)
| [
"numpy.asarray"
] | [((307, 326), 'numpy.asarray', 'np.asarray', (['outputs'], {}), '(outputs)\n', (317, 326), True, 'import numpy as np\n')] |
"""
General facilities for input (and output).
In order to work with TREPR data, these data need to be imported into the
trepr package. Therefore, the module provides importers for specific file
formats. In case of TREPR spectroscopy, the measurement control software is
often lab-written and specific for a local setup. One exception is the
Bruker BES3T file format written by <NAME> and Xenon software that can
be used to record TREPR data in combination with a pulsed EPR spectrometer.
Another class implemented in this module is the
:class:`trepr.io.DatasetImporterFactory`, a prerequisite for recipe-driven
data analysis. This factory returns the correct dataset importer for a
specific dataset depending on the information provided (usually, a filename).
Importers for specific file formats
===================================
Currently, the following importers for specific file formats are available:
* :class:`SpeksimImporter`
The Speksim format was developed in Freiburg in the group of Prof. G.
Kothe and used afterwards in the group of Prof. <NAME>. The spectrometer
control software was developed by Prof. <NAME>.
One speciality of this file format is that each transient is stored in an
individual file. For each of these transients, a timestamp as well as the
microwave frequency are recorded as well, allowing to analyse frequency
drifts and irregularities in the data acquisition.
* :class:`TezImporter`
The tez file format is the internal format used by the MATLAB(r) trepr
toolbox developed by <NAME>. It vaguely resembles the OpenDocument
format used, *e.g.*, by OpenOffice and LibreOffice. In short, the metadata
are contained in an XML file, while the numerical data are stored as IEEE
754 standard binaries in separate files. The ASpecD dataset format (adf)
is similar in some respect.
tez files usually carry ".tez" as file extension.
* :class:`Fsc2Importer`
The fsc2 file format originates from the fsc2 spectrometer control
software developed by <NAME> at the FU Berlin and used in a number of
labs. For details of the software, `see the fsc2 homepage
<https://users.physik.fu-berlin.de/~jtt/fsc2.phtml>`_. As the actual
experiments are defined in the "Experiment Description Language" (EDL),
the importer necessarily makes a number of assumptions that work with
the particular set of data recorded at a given time at FU Berlin.
fsc2 files usually carry ".dat" as file extension, although they are bare
text files.
Implementing importers for additional file formats is rather
straight-forward. For details, see the documentation of the :mod:`aspecd.io`
module.
Module documentation
====================
"""
import collections
import datetime
import glob
import io
import os
import re
import shutil
from zipfile import ZipFile
import numpy as np
import xmltodict
import aspecd.annotation
import aspecd.io
import aspecd.infofile
import aspecd.metadata
import aspecd.plotting
import aspecd.utils
class DatasetImporterFactory(aspecd.io.DatasetImporterFactory):
"""Factory for creating importer objects based on the source provided.
Often, data are available in different formats, and deciding which
importer is appropriate for a given format can be quite involved. To
free other classes from having to contain the relevant code, a factory
can be used.
Currently, the sole information provided to decide about the appropriate
importer is the source (a string). A concrete importer object is
returned by the method ``get_importer()``. If no source is provided,
an exception will be raised.
If the source string does not match any of the importers handled by this
module, the standard importers from the ASpecD framework are checked.
See the documentation of the :class:`aspecd.io.DatasetImporterFactory`
base class for details.
Attributes
----------
supported_formats : :class:`dict`
Dictionary who's keys correspond to the base name of the respective
importer (*i.e.*, without the suffix "Importer") and who's values are a
list of file extensions to detect the correct importer.
data_format : :class:`str`
Name of the format that has been detected.
"""
def __init__(self):
super().__init__()
self.supported_formats = {"": "Speksim",
".tez": "Tez",
".dat": "Fsc2",
".DSC": 'BES3T',
".DTA": 'BES3T'}
self.data_format = None
def _get_importer(self):
if os.path.isdir(self.source):
return SpeksimImporter(source=self.source)
self._find_format()
importer = None
if self.data_format:
importer = aspecd.utils.object_from_class_name(
".".join(["trepr", "io", self.data_format + "Importer"]))
importer.source = self.source
return importer
def _find_format(self):
_, extension = os.path.splitext(self.source)
if extension:
if extension in self.supported_formats.keys():
self.data_format = self._format_from_extension(extension)
else:
for extension in [extension for extension in
self.supported_formats if extension]:
if os.path.isfile(self.source + extension):
self.data_format = self._format_from_extension(extension)
def _format_from_extension(self, extension):
return list(self.supported_formats.values())[list(
self.supported_formats.keys()).index(extension)]
class SpeksimImporter(aspecd.io.DatasetImporter):
"""Importer for data in Freiburg Speksim format including its metadata.
Datasets in this format consist of several time traces, each of which is
stored in a text file. In order to analyse the raw data, it is necessary
to store the time traces all together in one dataset.
The corresponding metadata are read from an external file in infofile
format. For further information about the infofile format see:
`<https://www.till-biskup.de/en/software/info/format>`_.
Parameters
----------
source : :class:`str`
Path to the raw data.
Attributes
----------
dataset : :obj:`trepr.dataset.ExperimentalDataset`
Entity containing data and metadata.
Raises
------
FileNotFoundError
Raised if no infofile could be found.
"""
def __init__(self, source=''):
super().__init__(source=source)
# public properties
self.dataset = None
# protected properties
self._headerlines = 5
self._data = np.array([])
self._file_format = str()
self._time_stamps = np.array([])
self._mwfreq = np.array([])
self._comment_line = str()
self._time_unit = str()
self._field_unit = str()
self._intensity_unit = str()
self._mwfreq_unit = str()
self._time_axis = np.array([])
self._field_axis = np.array([])
self._infofile = aspecd.infofile.Infofile()
self._header = list()
self._format_no = int()
self._time_start = float()
self._time_stop = float()
self._time_points = int()
def _import(self):
"""Execute all necessary methods and write the data to a dataset."""
self._import_raw_data()
self._hand_data_to_dataset()
self._create_time_axis()
self._ensure_field_axis_in_SI_unit()
self._hand_axes_to_dataset()
self._load_infofile()
self._map_infofile()
self._create_time_stamp_data()
self._create_mw_freq_data()
def _import_raw_data(self):
"""Import the time traces and cut off the header lines."""
filenames = self._get_filenames()
for filename in filenames:
self._process_timetrace(filename)
self._data = \
np.reshape(self._data, [len(filenames), self._time_points])
def _get_filenames(self):
filenames = sorted(glob.glob(os.path.join(self.source,
'*.[0-9][0-9][0-9]')))
return filenames
def _process_timetrace(self, filename):
with open(filename) as file:
raw_data = file.read()
lines = raw_data.splitlines()
self._header = lines[0:self._headerlines]
self._parse_header()
numeric_data = self._process_numeric_data(raw_data)
self._data = np.append(self._data, numeric_data)
def _process_numeric_data(self, raw_data):
# noinspection PyTypeChecker
numeric_data = np.loadtxt(io.StringIO(raw_data),
skiprows=self._headerlines)
numeric_data = np.reshape(numeric_data, self._time_points)
return numeric_data
def _parse_header(self):
"""Execute the methods which parse the header lines."""
self._parse_header_1st_line()
self._parse_header_2nd_line()
self._parse_header_3rd_line()
self._parse_header_4th_line()
self._parse_header_5th_line()
def _parse_header_1st_line(self):
"""Parse the 1st header line and extract the time and date.
Example::
Source : transient; Time : Wed Jun 7 08:44:57 2017
"""
entries = self._header[0].split(';')
self._file_format = entries[0].split(':')[1].strip()
time_stamp = entries[1].split(' : ')[1]
time_stamp = datetime.datetime.strptime(time_stamp,
'%a %b %d %H:%M:%S %Y')
# noinspection PyTypeChecker
self._time_stamps = np.append(self._time_stamps, time_stamp)
def _parse_header_2nd_line(self):
"""Extract the field and frequency unit from the 2nd header line.
Example::
B0 = 4080.000000 Gauss, mw = 9.684967 GHz
"""
def parse_line(line):
matches = re.search('([A-Za-z0-9]*) = ([0-9.]*) ([A-Za-z]*)', line)
return float(matches.group(2)), matches.group(3)
entries = self._header[1].split(',')
field, self._field_unit = parse_line(entries[0])
mwfreq, self._mwfreq_unit = parse_line(entries[1])
self._field_axis = np.append(self._field_axis, field)
self._mwfreq = np.append(self._mwfreq, mwfreq)
def _parse_header_3rd_line(self):
"""Parse the 3rd header line.
Example::
NDI-T2 sa64 20/2 42/25 523nm/1mJ
"""
self._comment_line = self._header[2]
def _parse_header_4th_line(self):
"""Extract format number and time information from 4th header line.
Example::
1 5000 -1.001e-06 8.997e-06 0 0
"""
entries = self._header[3].split()[0:4]
self._format_no = int(entries[0])
self._time_points = int(entries[1])
self._time_start = float(entries[2])
self._time_stop = float(entries[3])
def _parse_header_5th_line(self):
"""Extract the time- and intensity-unit from the 5th header line.
Example::
s V
"""
self._time_unit, self._intensity_unit = self._header[4].split()
def _create_time_axis(self):
"""Create the time axis using the start, end, and time points."""
self._time_axis = \
np.linspace(self._time_start,
self._time_stop,
num=self._time_points)
def _load_infofile(self):
"""Import the infofile and parse it."""
infofile_name = self._get_infofile_name()
if not infofile_name:
raise FileNotFoundError('Infofile not found.')
self._infofile.filename = infofile_name[0]
self._infofile.parse()
def _get_infofile_name(self):
return glob.glob(os.path.join(self.source, '*.info'))
def _map_infofile(self):
"""Bring the metadata to a given format."""
infofile_version = self._infofile.infofile_info['version']
self._map_metadata(infofile_version)
self._assign_comment_as_annotation()
def _assign_comment_as_annotation(self):
comment = aspecd.annotation.Comment()
comment.comment = self._infofile.parameters['COMMENT']
self.dataset.annotate(comment)
def _map_metadata(self, infofile_version):
"""Bring the metadata into a unified format."""
mapper = aspecd.metadata.MetadataMapper()
mapper.version = infofile_version
mapper.metadata = self._infofile.parameters
mapper.recipe_filename = 'trepr@metadata_mapper.yaml'
mapper.map()
self.dataset.metadata.from_dict(mapper.metadata)
def _hand_data_to_dataset(self):
"""Hand the data to the dataset structure."""
self.dataset.data.data = self._data
# noinspection PyPep8Naming
def _ensure_field_axis_in_SI_unit(self): # noqa: N802
"""Ensure that the field axis unit is in SI unit."""
if self._field_unit == 'Gauss':
self._field_unit = 'mT'
self._field_axis = self._field_axis / 10
def _hand_axes_to_dataset(self):
"""Hand the axes and intensity to the dataset structure."""
self.dataset.data.axes[0].values = self._field_axis
self.dataset.data.axes[0].unit = self._field_unit
self.dataset.data.axes[0].quantity = 'magnetic field'
self.dataset.data.axes[1].values = self._time_axis
self.dataset.data.axes[1].unit = self._time_unit
self.dataset.data.axes[1].quantity = 'time'
self.dataset.data.axes[2].unit = self._intensity_unit
self.dataset.data.axes[2].quantity = 'intensity'
def _create_time_stamp_data(self):
"""Hand the time stamp data to the dataset structure."""
self.dataset.time_stamp.data = self._time_stamps
self.dataset.time_stamp.axes[0].values = self._field_axis
self.dataset.time_stamp.axes[0].unit = self._field_unit
self.dataset.time_stamp.axes[0].quantity = 'magnetic field'
self.dataset.time_stamp.axes[1].quantity = 'date'
def _create_mw_freq_data(self):
"""Hand the microwave frequency data to the dataset structure."""
self.dataset.microwave_frequency.data = self._mwfreq
self.dataset.microwave_frequency.axes[0].values = self._field_axis
self.dataset.microwave_frequency.axes[0].unit = self._field_unit
self.dataset.microwave_frequency.axes[0].quantity = 'magnetic field'
self.dataset.microwave_frequency.axes[1].unit = self._mwfreq_unit
self.dataset.microwave_frequency.axes[1].quantity = \
'microwave frequency'
class TezImporter(aspecd.io.DatasetImporter):
"""Importer for MATLAB(r) trepr toolbox format.
The MATLAB(r) trepr toolbox format is basically a ZIP archive consisting
of a list of standard IEEE 754 binary files containing the data and an
XML file containing the accompanying metadata in structured form,
enriched with information necessary to directly convert them back into
MATLAB structures (corresponding to a Python :class:`dict`).
Parameters
----------
source : :class:`str`
Path to the raw data.
Attributes
----------
dataset : :obj:`trepr.dataset.ExperimentalDataset`
Entity containing data and metadata.
"""
def __init__(self, source=''):
# Dirty fix: Cut file extension
if source.endswith(".tez"):
source = source[:-4]
super().__init__(source=source)
# public properties
self.tez_mapper_filename = 'trepr@tez_mapper.yaml'
self.xml_dict = None
self.dataset = None
self.metadata_filename = ''
self.load_infofile = True
# private properties
self._metadata = None
self._infofile = aspecd.infofile.Infofile()
self._root_dir = ''
self._filename = ''
self._tmpdir = ''
self._raw_data_name = ''
self._raw_data_shape_filename = ''
def _import(self):
self._unpack_zip()
self._get_dir_and_filenames()
self._import_xml_data_to_dict()
self._get_data_from_binary()
self._parse_axes()
if self.load_infofile and self._infofile_exists():
self._load_infofile()
self._map_infofile()
self._get_metadata_from_xml()
self._get_mw_frequencies()
self._remove_tmp_directory()
def _unpack_zip(self):
self._root_dir, self._filename = os.path.split(self.source)
self._tmpdir = os.path.join(self._root_dir, 'tmp')
with ZipFile(self.source + '.tez', 'r') as zip_obj:
zip_obj.extractall(self._tmpdir)
def _get_dir_and_filenames(self):
hidden_filename = os.listdir(os.path.join(self._root_dir, 'tmp'))[0]
self.metadata_filename = os.path.join(self._root_dir, 'tmp',
hidden_filename, 'struct.xml')
self._raw_data_name = \
os.path.join(self._root_dir, 'tmp', hidden_filename,
'binaryData', 'data')
self._raw_data_shape_filename = os.path.join(self._raw_data_name +
'.dim')
def _import_xml_data_to_dict(self):
with open(self.metadata_filename, 'r') as file:
xml_data = file.read()
self.xml_dict = xmltodict.parse(xml_data)
def _get_data_from_binary(self):
with open(self._raw_data_shape_filename, 'r') as f:
shape = list([int(x) for x in f.read().split()])
shape.reverse() # Shape is given in reverse order in .dim file
raw_data = np.fromfile(self._raw_data_name, dtype='<f8')
raw_data = np.reshape(raw_data, shape).transpose()
self.dataset.data.data = raw_data
def _parse_axes(self):
if len(self.xml_dict['struct']['axes']['data']['measure']) > 3:
raise NotImplementedError('No method to import more than 3 axes. '
'This task is left to you.')
for axis in self.xml_dict['struct']['axes']['data']['measure']:
self._get_magnetic_field_axis(axis)
self._get_time_axis(axis)
def _get_magnetic_field_axis(self, axis):
if '#text' in axis.keys() and axis['#text'] == 'magnetic field':
id_ = int(axis['@id']) - 1
self.dataset.data.axes[0].quantity = 'magnetic field'
self.dataset.data.axes[0].values = \
self._get_values_from_xml_dict(id_=id_)
assert int(self.xml_dict['struct']['axes']['data']['values'][
id_]['@id']) == (id_ + 1), 'Axis-IDs do not match!'
self.dataset.data.axes[0].unit = self.xml_dict['struct']['axes'][
'data']['unit'][id_]['#text']
def _get_time_axis(self, axis):
if '#text' in axis.keys() and axis['#text'] == 'time':
id_ = int(axis['@id']) - 1
self.dataset.data.axes[1].quantity = 'time'
self.dataset.data.axes[1].values = \
self._get_values_from_xml_dict(id_=id_)
assert int(self.xml_dict['struct']['axes']['data']['values'][
id_]['@id']) == (id_ + 1)
self.dataset.data.axes[1].unit = self.xml_dict['struct']['axes'][
'data']['unit'][id_]['#text']
def _infofile_exists(self):
if self._get_infofile_name() and os.path.exists(
self._get_infofile_name()[0]):
return True
print('No infofile found for dataset %s, import continued without '
'infofile.' % os.path.split(self.source)[1])
return False
def _get_infofile_name(self):
return glob.glob(''.join([self.source.strip(), '.info']))
def _load_infofile(self):
"""Import infofile and parse it."""
infofile_name = self._get_infofile_name()
self._infofile.filename = infofile_name[0]
self._infofile.parse()
def _map_infofile(self):
"""Bring the metadata to a given format."""
infofile_version = self._infofile.infofile_info['version']
self._map_metadata(infofile_version)
self._assign_comment_as_annotation()
def _map_metadata(self, infofile_version):
"""Bring the metadata into a unified format."""
mapper = aspecd.metadata.MetadataMapper()
mapper.version = infofile_version
mapper.metadata = self._infofile.parameters
mapper.recipe_filename = 'trepr@metadata_mapper.yaml'
mapper.map()
self._metadata = \
aspecd.utils.convert_keys_to_variable_names(mapper.metadata)
def _assign_comment_as_annotation(self):
comment = aspecd.annotation.Comment()
comment.comment = self._infofile.parameters['COMMENT']
self.dataset.annotate(comment)
def _get_values_from_xml_dict(self, id_=None):
values = np.asarray([float(i) for i in
self.xml_dict['struct']['axes']['data'][
'values'][id_]['#text'].split(' ') if i])
return values
def _get_metadata_from_xml(self):
mapping = aspecd.utils.Yaml()
mapping.read_stream(
aspecd.utils.get_package_data(self.tez_mapper_filename).encode())
metadata_dict = collections.OrderedDict()
for key, subdict in mapping.dict.items():
metadata_dict[key] = collections.OrderedDict()
for key2, value in subdict.items():
metadata_dict[key][key2] = \
self._cascade(self.xml_dict['struct'], value)
self._metadata = self._fuse_with_existing_metadata(metadata_dict)
self.dataset.metadata.from_dict(self._metadata)
# Cause Copycat in UdS measurement program:
self.dataset.metadata.bridge.attenuation.unit = 'dB'
def _fuse_with_existing_metadata(self, metadata_dict):
metadata_dict = \
aspecd.utils.remove_empty_values_from_dict(metadata_dict)
infofile_metadata = \
aspecd.utils.copy_values_between_dicts(target=self._metadata,
source=metadata_dict)
return infofile_metadata
def _cascade(self, dict_, value):
keys = value.split('.')
return_value = dict_
for key in keys:
return_value = return_value[key]
if self._get_physical_quantity(return_value):
return_value = self._get_physical_quantity(return_value)
elif self._get_value(return_value):
return_value = self._get_value(return_value)
else:
return_value = ''
return return_value
@staticmethod
def _get_value(dict_):
return_value = None
if '#text' in dict_.keys():
return_value = dict_['#text']
return return_value
@staticmethod
def _get_physical_quantity(dict_):
return_value = None
if 'value' and 'unit' in dict_.keys():
if '#text' in dict_['value'].keys():
return_value = {
'value': float(dict_['value']['#text']),
'unit': dict_['unit']['#text']
}
return return_value
def _get_mw_frequencies(self):
"""Get the dataset with real frequencies of each magnetic field point.
This is special for the trepr dataset but useful to track frequency
drifts. In th UdS-measurement program, the frequency is automaticly
written in the tez structure.
"""
if self._xml_contains_mw_frequencies():
self.dataset.microwave_frequency.data = \
np.asarray([float(i) for i in self.xml_dict['struct'][
'parameters']['bridge']['MWfrequency']['values'][
'#text'].split(' ') if i])
self.dataset.microwave_frequency.axes[0] = \
self.dataset.data.axes[0]
self.dataset.microwave_frequency.axes[1].unit = \
self.dataset.metadata.bridge.mw_frequency.unit
self.dataset.microwave_frequency.axes[1].quantity = \
'microwave frequency'
def _xml_contains_mw_frequencies(self):
answer = False
if '#text' in self.xml_dict['struct']['parameters']['bridge'][
'MWfrequency']['values']:
answer = True
return answer
def _remove_tmp_directory(self):
if os.path.exists(self._tmpdir):
shutil.rmtree(self._tmpdir)
class Fsc2Importer(aspecd.io.DatasetImporter):
"""
Importer for data in Berlin fsc2 format.
These data have been recorded using the flexible fsc2 spectrometer
control software written by <NAME>, allowing to define user-specific
experiments. For details, `see the fsc2 homepage
<https://users.physik.fu-berlin.de/~jtt/fsc2.phtml>`_. As the actual
experiments are defined in the "Experiment Description Language" (EDL),
the importer necessarily makes a number of assumptions that work with
the particular set of data recorded at a given time at FU Berlin.
Key aspects of the data format are:
* The data files are bare text files (ASCII)
* The entire EDL program defining the experiment is contained in a header
* The header usually starts with ``%``
* The data are stored as ASCII numbers in one (very) long column.
* At the bottom of the header is a series of key-value pairs with
crucial metadata necessary to reshape the data and assign the axes.
The following strategy has been applied to reading the data:
* Read the header first, separately storing the key-value pairs at the end.
* Read the data using :func:`numpy.loadtxt`.
* Reshape the data according to the parameters read from the header.
Only those parameters that can definitely be obtained from the header
are stored within the metadata of the dataset. Note that using this
format by the authors predates the development of the info file format,
hence many parameters can only implicitly be guessed, as most of the
time no corresponding record of metadata exists.
.. note::
While it may seem strange to store the data as one long column,
this is actually a very robust way of storing data, as each
individual trace gets written to the file immediately after
obtaining the data from the transient recorder. Thus, even if
something crashed during a measurement (blackout, ...), all data up
to this event are usually retained.
.. versionadded:: 0.2
"""
def __init__(self, source=''):
super().__init__(source=source)
self._header = []
self._parameters = dict()
self._comment = []
self._devices = []
self._device_list = {
'tds520A': 'Tektronix TDS520A',
'bh15_fc': 'Bruker BH15',
'aeg_x_band': 'AEG Magnet Power Supply',
'er035m_s': 'Bruker ER 035 M',
}
self._orig_source = ''
def _import(self):
if not os.path.splitext(self.source)[1]:
self._orig_source = self.source
self.source += '.dat'
self._read_header()
self._load_and_assign_data()
self._assign_field_axis()
self._assign_time_axis()
self._assign_intensity_axis()
# Metadata can only be assigned after the axes
self._assign_metadata()
self._assign_comment()
self.source = self._orig_source or self.source
# pylint: disable=too-many-nested-blocks
def _read_header(self):
"""
fsc2 files start with a header containing the entire program.
The header is usually marked with ``%``, and at the end, after the
program, a series of key-value pairs are printed that are crucial to
reshape the data and assign the axes.
The list of parameters got extended over time, and the very end of
the header consists of comments the user can enter immediately
before starting the experiment.
"""
in_header = True
parameter_line = False
with open(self.source, 'r', encoding="utf8") as file:
while in_header:
line = file.readline()
if line.startswith('%'):
line = line.replace('%', '', 1).strip()
self._header.append(line)
if line.startswith('Number of runs'):
parameter_line = True
if parameter_line:
if ' = ' in line:
key, value = line.split(' = ')
try:
if '.' in value:
value = float(value)
else:
value = int(value)
except ValueError:
pass
self._parameters[key.strip()] = value
elif line:
self._comment.append(line)
else:
in_header = False
self._get_list_of_active_devices()
def _load_and_assign_data(self):
data = np.loadtxt(self.source, comments="% ")
self.dataset.data.data = \
data.reshape([-1, self._parameters['Number of points']])
def _assign_field_axis(self):
field_start = float(self._parameters['Start field'].split(' ')[0]) / 10
field_end = float(self._parameters['End field'].split(' ')[0]) / 10
self.dataset.data.axes[0].values = \
np.linspace(field_start, field_end, self.dataset.data.data.shape[0])
self.dataset.data.axes[0].quantity = 'magnetic field'
self.dataset.data.axes[0].unit = 'mT'
def _assign_time_axis(self):
trigger_position = self._parameters['Trigger position']
number_of_points = self.dataset.data.data.shape[1]
relative_trigger_position = trigger_position / number_of_points
slice_length = \
float(self._parameters['Slice length'].split(' ')[0]) * 1e-6
self.dataset.data.axes[1].values = np.linspace(
-slice_length * relative_trigger_position,
slice_length - (slice_length * relative_trigger_position),
number_of_points
)
self.dataset.data.axes[1].quantity = 'time'
self.dataset.data.axes[1].unit = 's'
def _assign_intensity_axis(self):
self.dataset.data.axes[2].quantity = 'intensity'
self.dataset.data.axes[2].unit = 'V'
def _assign_metadata(self):
"""
Assign as many metadata as sensibly possible.
Admittedly, this is currently pretty much hand-coding. For sure,
there are more elegant ways to do it.
"""
self._assign_transient_metadata()
self._assign_recorder_metadata()
self._assign_bridge_metadata()
self._assign_temperature_control_metadata()
self._assign_pump_metadata()
self._assign_magnetic_field_metadata()
# Needs to be done after assigning pump metadata
self._assign_experiment_metadata()
def _assign_transient_metadata(self):
self.dataset.metadata.transient.points = \
self._parameters['Number of points']
self.dataset.metadata.transient.trigger_position = \
self._parameters['Trigger position']
value, _ = self._parameters['Slice length'].split()
self.dataset.metadata.transient.length.value = float(value) * 1e-6
self.dataset.metadata.transient.length.unit = 's'
def _assign_recorder_metadata(self):
if 'Sensitivity' in self._parameters:
self._assign_value_unit('Sensitivity',
self.dataset.metadata.recorder.sensitivity)
if 'Time base' in self._parameters:
self._assign_value_unit('Time base',
self.dataset.metadata.recorder.time_base)
if 'Number of averages' in self._parameters:
self.dataset.metadata.recorder.averages = \
self._parameters['Number of averages']
self.dataset.metadata.recorder.pretrigger.value = \
np.abs(self.dataset.data.axes[1].values[0])
self.dataset.metadata.recorder.pretrigger.unit = \
self.dataset.data.axes[1].unit
if 'tds520A' in self._devices:
self.dataset.metadata.recorder.model = self._device_list['tds520A']
def _assign_bridge_metadata(self):
if 'MW frequency' in self._parameters:
self._assign_value_unit('MW frequency',
self.dataset.metadata.bridge.mw_frequency)
if 'Attenuation' in self._parameters:
self._assign_value_unit('Attenuation',
self.dataset.metadata.bridge.attenuation)
def _assign_temperature_control_metadata(self):
if 'Temperature' in self._parameters:
self._assign_value_unit(
'Temperature',
self.dataset.metadata.temperature_control.temperature)
def _assign_pump_metadata(self):
if 'Laser wavelength' in self._parameters:
wavelength, repetition_rate = \
self._parameters['Laser wavelength'].split(' (')
value, unit = wavelength.split()
self.dataset.metadata.pump.wavelength.value = float(value)
self.dataset.metadata.pump.wavelength.unit = unit
value, unit = repetition_rate.split()
self.dataset.metadata.pump.repetition_rate.value = float(value)
self.dataset.metadata.pump.repetition_rate.unit = \
unit.replace(')', '')
def _assign_magnetic_field_metadata(self):
if 'bh15_fc' in self._devices:
self.dataset.metadata.magnetic_field.controller = \
self._device_list['bh15_fc']
self.dataset.metadata.magnetic_field.power_supply = \
self._device_list['bh15_fc']
self.dataset.metadata.magnetic_field.field_probe_model = \
self._device_list['bh15_fc']
self.dataset.metadata.magnetic_field.field_probe_type = 'Hall probe'
if 'aeg_x_band' in self._devices:
self.dataset.metadata.magnetic_field.controller = 'home-built'
self.dataset.metadata.magnetic_field.power_supply = \
self._device_list['aeg_x_band']
if 'er035m_s' in self._devices:
self.dataset.metadata.magnetic_field.field_probe_model = \
self._device_list['er035m_s']
self.dataset.metadata.magnetic_field.field_probe_type = \
'NMR Gaussmeter'
def _assign_experiment_metadata(self):
self.dataset.metadata.experiment.runs = \
self._parameters['Number of runs']
self.dataset.metadata.experiment.shot_repetition_rate = \
self.dataset.metadata.pump.repetition_rate
def _assign_comment(self):
if self._comment:
comment = aspecd.annotation.Comment()
comment.content = ' '.join(self._comment)
self.dataset.annotate(comment)
def _assign_value_unit(self, parameter='', metadata=None):
value, unit = self._parameters[parameter].split()
metadata.value = float(value)
metadata.unit = unit.split('/')[0]
def _get_list_of_active_devices(self):
in_devices = False
for line in self._header:
if 'DEVICES:' in line:
in_devices = True
continue
if 'VARIABLES:' in line:
break
if in_devices and line and not line.startswith('//'):
device = line.split()[0].replace(';', '').strip()
self._devices.append(device)
class BES3TImporter(aspecd.io.DatasetImporter):
"""
Importer for data in Bruker BES3T format.
The Bruker BES3T format consists of at least two files, a data file with
extension "DTA" and a descriptor file with extension "DSC". In case of
multidimensional data, additional data files may be written (e.g.
with extension ".YGF"), similarly to the case where the X axis is not
equidistant (at least the BES3T specification allows this situation).
This importer currently only supports a smaller subset of the
specification, *e.g.* only data without additional axes data files and
only real values. This may, however, change in the future.
.. versionadded:: 0.2
"""
def __init__(self, source=None):
super().__init__(source=source)
self._orig_source = ''
self._dsc_keys = dict()
self._mapper_filename = 'bes3t_dsc_keys.yaml'
def _import(self):
base_file, extension = os.path.splitext(self.source)
if extension:
self._orig_source = self.source
self.source = base_file
self._read_dsc_file()
self._map_dsc_file()
self._read_dta_file()
self._assign_axes()
self.source = self._orig_source or self.source
def _read_dsc_file(self): # noqa: MC0001
with open(self.source + '.DSC', 'r', encoding='utf8') as file:
file_contents = file.readlines()
block = ''
for line in file_contents:
if '*' in line:
line, _ = line.split('*', maxsplit=1)
line = line.strip()
if not line:
continue
if line.startswith('#'):
block, _ = line[1:].split()
continue
if block and block in ['DESC', 'SPL']:
try:
key, value = line.split(maxsplit=1)
value = value.replace("'", "")
try:
if '.' in value:
value = float(value)
else:
value = int(value)
except ValueError:
pass
except ValueError:
key = line.split(maxsplit=1)[0]
value = None
self._dsc_keys[key] = value
def _map_dsc_file(self):
yaml_file = aspecd.utils.Yaml()
yaml_file.read_stream(aspecd.utils.get_package_data(
'trepr@' + self._mapper_filename).encode())
metadata_dict = {}
metadata_dict = self._traverse(yaml_file.dict, metadata_dict)
self.dataset.metadata.from_dict(metadata_dict)
self.dataset.label = self._dsc_keys['TITL']
def _traverse(self, dict_, metadata_dict):
for key, value in dict_.items():
if isinstance(value, dict):
metadata_dict[key] = {}
self._traverse(value, metadata_dict[key])
elif value in self._dsc_keys.keys():
metadata_dict[key] = self._dsc_keys[value]
return metadata_dict
def _read_dta_file(self):
filename = self.source + '.DTA'
byte_order = '>' if self._dsc_keys['BSEQ'] == 'BIG' else '<'
format_ = {
'S': 'h',
'I': 'i',
'F': 'f',
'D': 'd',
}
dtype = byte_order + format_[self._dsc_keys['IRFMT']]
self.dataset.data.data = np.fromfile(filename, dtype=dtype)
if 'YPTS' in self._dsc_keys and self._dsc_keys['YPTS']:
self.dataset.data.data = \
np.reshape(self.dataset.data.data,
(-1, self._dsc_keys['XPTS'])).T
if self._dsc_keys['XNAM'].lower() == "time":
self.dataset.data.data = self.dataset.data.data.T
def _assign_axes(self):
yaxis = None
xaxis = np.linspace(self._dsc_keys['XMIN'],
self._dsc_keys['XMIN'] + self._dsc_keys['XWID'],
self._dsc_keys['XPTS'])
if 'YTYP' in self._dsc_keys and self._dsc_keys['YTYP'] == 'IDX':
yaxis = np.linspace(self._dsc_keys['YMIN'],
self._dsc_keys['YMIN']
+ self._dsc_keys['YWID'],
self._dsc_keys['YPTS'])
if 'XNAM' in self._dsc_keys \
and self._dsc_keys['XNAM'].lower() == "time":
self.dataset.data.axes[0].values = yaxis
self.dataset.data.axes[0].unit = self._dsc_keys['YUNI']
self.dataset.data.axes[1].values = xaxis
self.dataset.data.axes[1].unit = self._dsc_keys['XUNI']
else:
self.dataset.data.axes[0].values = xaxis
self.dataset.data.axes[0].unit = self._dsc_keys['XUNI']
if yaxis is not None:
self.dataset.data.axes[1].values = yaxis
self.dataset.data.axes[1].unit = self._dsc_keys['YUNI']
for axis in self.dataset.data.axes:
if axis.unit == 'G':
axis.values /= 10
axis.unit = 'mT'
axis.quantity = 'magnetic field'
if axis.unit == 'ns':
axis.values /= 1e9
axis.unit = 's'
axis.quantity = 'time'
self.dataset.data.axes[-1].quantity = self._dsc_keys['IRNAM'].lower()
self.dataset.data.axes[-1].unit = self._dsc_keys['IRUNI']
| [
"numpy.abs",
"os.path.isfile",
"shutil.rmtree",
"os.path.join",
"os.path.exists",
"numpy.append",
"numpy.reshape",
"numpy.linspace",
"numpy.loadtxt",
"re.search",
"io.StringIO",
"datetime.datetime.strptime",
"zipfile.ZipFile",
"os.path.isdir",
"numpy.fromfile",
"numpy.array",
"os.pat... | [((4628, 4654), 'os.path.isdir', 'os.path.isdir', (['self.source'], {}), '(self.source)\n', (4641, 4654), False, 'import os\n'), ((5044, 5073), 'os.path.splitext', 'os.path.splitext', (['self.source'], {}), '(self.source)\n', (5060, 5073), False, 'import os\n'), ((6752, 6764), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6760, 6764), True, 'import numpy as np\n'), ((6827, 6839), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6835, 6839), True, 'import numpy as np\n'), ((6863, 6875), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6871, 6875), True, 'import numpy as np\n'), ((7073, 7085), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7081, 7085), True, 'import numpy as np\n'), ((7113, 7125), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7121, 7125), True, 'import numpy as np\n'), ((8590, 8625), 'numpy.append', 'np.append', (['self._data', 'numeric_data'], {}), '(self._data, numeric_data)\n', (8599, 8625), True, 'import numpy as np\n'), ((8853, 8896), 'numpy.reshape', 'np.reshape', (['numeric_data', 'self._time_points'], {}), '(numeric_data, self._time_points)\n', (8863, 8896), True, 'import numpy as np\n'), ((9587, 9649), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['time_stamp', '"""%a %b %d %H:%M:%S %Y"""'], {}), "(time_stamp, '%a %b %d %H:%M:%S %Y')\n", (9613, 9649), False, 'import datetime\n'), ((9763, 9803), 'numpy.append', 'np.append', (['self._time_stamps', 'time_stamp'], {}), '(self._time_stamps, time_stamp)\n', (9772, 9803), True, 'import numpy as np\n'), ((10365, 10399), 'numpy.append', 'np.append', (['self._field_axis', 'field'], {}), '(self._field_axis, field)\n', (10374, 10399), True, 'import numpy as np\n'), ((10423, 10454), 'numpy.append', 'np.append', (['self._mwfreq', 'mwfreq'], {}), '(self._mwfreq, mwfreq)\n', (10432, 10454), True, 'import numpy as np\n'), ((11475, 11544), 'numpy.linspace', 'np.linspace', (['self._time_start', 'self._time_stop'], {'num': 'self._time_points'}), '(self._time_start, self._time_stop, num=self._time_points)\n', (11486, 11544), True, 'import numpy as np\n'), ((16646, 16672), 'os.path.split', 'os.path.split', (['self.source'], {}), '(self.source)\n', (16659, 16672), False, 'import os\n'), ((16696, 16731), 'os.path.join', 'os.path.join', (['self._root_dir', '"""tmp"""'], {}), "(self._root_dir, 'tmp')\n", (16708, 16731), False, 'import os\n'), ((16986, 17052), 'os.path.join', 'os.path.join', (['self._root_dir', '"""tmp"""', 'hidden_filename', '"""struct.xml"""'], {}), "(self._root_dir, 'tmp', hidden_filename, 'struct.xml')\n", (16998, 17052), False, 'import os\n'), ((17143, 17217), 'os.path.join', 'os.path.join', (['self._root_dir', '"""tmp"""', 'hidden_filename', '"""binaryData"""', '"""data"""'], {}), "(self._root_dir, 'tmp', hidden_filename, 'binaryData', 'data')\n", (17155, 17217), False, 'import os\n'), ((17283, 17325), 'os.path.join', 'os.path.join', (["(self._raw_data_name + '.dim')"], {}), "(self._raw_data_name + '.dim')\n", (17295, 17325), False, 'import os\n'), ((17535, 17560), 'xmltodict.parse', 'xmltodict.parse', (['xml_data'], {}), '(xml_data)\n', (17550, 17560), False, 'import xmltodict\n'), ((17811, 17856), 'numpy.fromfile', 'np.fromfile', (['self._raw_data_name'], {'dtype': '"""<f8"""'}), "(self._raw_data_name, dtype='<f8')\n", (17822, 17856), True, 'import numpy as np\n'), ((21461, 21486), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (21484, 21486), False, 'import collections\n'), ((24598, 24626), 'os.path.exists', 'os.path.exists', (['self._tmpdir'], {}), '(self._tmpdir)\n', (24612, 24626), False, 'import os\n'), ((29458, 29496), 'numpy.loadtxt', 'np.loadtxt', (['self.source'], {'comments': '"""% """'}), "(self.source, comments='% ')\n", (29468, 29496), True, 'import numpy as np\n'), ((29849, 29917), 'numpy.linspace', 'np.linspace', (['field_start', 'field_end', 'self.dataset.data.data.shape[0]'], {}), '(field_start, field_end, self.dataset.data.data.shape[0])\n', (29860, 29917), True, 'import numpy as np\n'), ((30396, 30530), 'numpy.linspace', 'np.linspace', (['(-slice_length * relative_trigger_position)', '(slice_length - slice_length * relative_trigger_position)', 'number_of_points'], {}), '(-slice_length * relative_trigger_position, slice_length - \n slice_length * relative_trigger_position, number_of_points)\n', (30407, 30530), True, 'import numpy as np\n'), ((32473, 32516), 'numpy.abs', 'np.abs', (['self.dataset.data.axes[1].values[0]'], {}), '(self.dataset.data.axes[1].values[0])\n', (32479, 32516), True, 'import numpy as np\n'), ((37039, 37068), 'os.path.splitext', 'os.path.splitext', (['self.source'], {}), '(self.source)\n', (37055, 37068), False, 'import os\n'), ((39543, 39577), 'numpy.fromfile', 'np.fromfile', (['filename'], {'dtype': 'dtype'}), '(filename, dtype=dtype)\n', (39554, 39577), True, 'import numpy as np\n'), ((39972, 40085), 'numpy.linspace', 'np.linspace', (["self._dsc_keys['XMIN']", "(self._dsc_keys['XMIN'] + self._dsc_keys['XWID'])", "self._dsc_keys['XPTS']"], {}), "(self._dsc_keys['XMIN'], self._dsc_keys['XMIN'] + self._dsc_keys\n ['XWID'], self._dsc_keys['XPTS'])\n", (39983, 40085), True, 'import numpy as np\n'), ((8745, 8766), 'io.StringIO', 'io.StringIO', (['raw_data'], {}), '(raw_data)\n', (8756, 8766), False, 'import io\n'), ((10057, 10114), 're.search', 're.search', (['"""([A-Za-z0-9]*) = ([0-9.]*) ([A-Za-z]*)"""', 'line'], {}), "('([A-Za-z0-9]*) = ([0-9.]*) ([A-Za-z]*)', line)\n", (10066, 10114), False, 'import re\n'), ((11953, 11988), 'os.path.join', 'os.path.join', (['self.source', '"""*.info"""'], {}), "(self.source, '*.info')\n", (11965, 11988), False, 'import os\n'), ((16745, 16779), 'zipfile.ZipFile', 'ZipFile', (["(self.source + '.tez')", '"""r"""'], {}), "(self.source + '.tez', 'r')\n", (16752, 16779), False, 'from zipfile import ZipFile\n'), ((21570, 21595), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (21593, 21595), False, 'import collections\n'), ((24640, 24667), 'shutil.rmtree', 'shutil.rmtree', (['self._tmpdir'], {}), '(self._tmpdir)\n', (24653, 24667), False, 'import shutil\n'), ((40230, 40343), 'numpy.linspace', 'np.linspace', (["self._dsc_keys['YMIN']", "(self._dsc_keys['YMIN'] + self._dsc_keys['YWID'])", "self._dsc_keys['YPTS']"], {}), "(self._dsc_keys['YMIN'], self._dsc_keys['YMIN'] + self._dsc_keys\n ['YWID'], self._dsc_keys['YPTS'])\n", (40241, 40343), True, 'import numpy as np\n'), ((5387, 5426), 'os.path.isfile', 'os.path.isfile', (['(self.source + extension)'], {}), '(self.source + extension)\n', (5401, 5426), False, 'import os\n'), ((8151, 8197), 'os.path.join', 'os.path.join', (['self.source', '"""*.[0-9][0-9][0-9]"""'], {}), "(self.source, '*.[0-9][0-9][0-9]')\n", (8163, 8197), False, 'import os\n'), ((16913, 16948), 'os.path.join', 'os.path.join', (['self._root_dir', '"""tmp"""'], {}), "(self._root_dir, 'tmp')\n", (16925, 16948), False, 'import os\n'), ((17876, 17903), 'numpy.reshape', 'np.reshape', (['raw_data', 'shape'], {}), '(raw_data, shape)\n', (17886, 17903), True, 'import numpy as np\n'), ((27240, 27269), 'os.path.splitext', 'os.path.splitext', (['self.source'], {}), '(self.source)\n', (27256, 27269), False, 'import os\n'), ((39697, 39761), 'numpy.reshape', 'np.reshape', (['self.dataset.data.data', "(-1, self._dsc_keys['XPTS'])"], {}), "(self.dataset.data.data, (-1, self._dsc_keys['XPTS']))\n", (39707, 39761), True, 'import numpy as np\n'), ((19763, 19789), 'os.path.split', 'os.path.split', (['self.source'], {}), '(self.source)\n', (19776, 19789), False, 'import os\n')] |
# example module containing functions for using dask.delayed to lazy load gadget chunks
# and then compute stats across gadget chunks with dask.delayed.
#
# main method to call: selector_stats()
#
# some "features":
# * runs in parallel if a dask Client is running: reading and local by-chunk stats will
# be spread across the Client. Global cross-chunk stats are calculated by aggregation
# of local by-chunk stats.
# * works with selection objects
# * ONLY WORKS FOR ONE FIELD RIGHT NOW
#
# example usage:
# import yt
# from dask_chunking import gadget as ga
# ds = yt.load_sample("snapshot_033")
# ptf = {'PartType0': 'Mass'}
# sp = ds.sphere(ds.domain_center,(2,'code_length'))
# glob_stats, chunk_stats = ga.selector_stats(ds,ptf,sp.selector)
#
import yt
import h5py
from dask import dataframe as df, array as da, delayed, compute
import numpy as np
import dask
def load_single_chunk(self,data_file,ptf,selector):
si, ei = data_file.start, data_file.end
f = h5py.File(data_file.filename, mode="r")
chunk_data = []
for ptype, field_list in sorted(ptf.items()):
if data_file.total_particles[ptype] == 0:
continue
g = f[f"/{ptype}"]
if getattr(selector, "is_all_data", False):
mask = slice(None, None, None)
mask_sum = data_file.total_particles[ptype]
hsmls = None
else:
coords = g["Coordinates"][si:ei].astype("float64")
if ptype == "PartType0":
hsmls = self._get_smoothing_length(
data_file, g["Coordinates"].dtype, g["Coordinates"].shape
).astype("float64")
else:
hsmls = 0.0
mask = selector.select_points(
coords[:, 0], coords[:, 1], coords[:, 2], hsmls
)
if mask is not None:
mask_sum = mask.sum()
del coords
if mask is None:
continue
for field in field_list:
if field in ("Mass", "Masses") and ptype not in self.var_mass:
data = np.empty(mask_sum, dtype="float64")
ind = self._known_ptypes.index(ptype)
data[:] = self.ds["Massarr"][ind]
elif field in self._element_names:
rfield = "ElementAbundance/" + field
data = g[rfield][si:ei][mask, ...]
elif field.startswith("Metallicity_"):
col = int(field.rsplit("_", 1)[-1])
data = g["Metallicity"][si:ei, col][mask]
elif field.startswith("GFM_Metals_"):
col = int(field.rsplit("_", 1)[-1])
data = g["GFM_Metals"][si:ei, col][mask]
elif field.startswith("Chemistry_"):
col = int(field.rsplit("_", 1)[-1])
data = g["ChemistryAbundances"][si:ei, col][mask]
elif field == "smoothing_length":
# This is for frontends which do not store
# the smoothing length on-disk, so we do not
# attempt to read them, but instead assume
# that they are calculated in _get_smoothing_length.
if hsmls is None:
hsmls = self._get_smoothing_length(
data_file,
g["Coordinates"].dtype,
g["Coordinates"].shape,
).astype("float64")
data = hsmls[mask]
else:
data = g[field][si:ei][mask, ...]
if data.size > 0:
# NOTE: we're actually SUBCHUNKING here!
subchunk_size=100000
if data.ndim > 1:
subchunk_shape = (subchunk_size,1) # dont chunk up multidim arrays like Coordinates
else:
subchunk_shape = (subchunk_size)
chunk_data.append([(ptype, field), da.from_array(data,chunks=subchunk_shape)])
f.close()
return chunk_data
def read_particle_fields_delayed(self,chunks, ptf, selector):
# returns a list of dask-delayed chunks, agnostic to chunk sizes
# let's still loop over the chunks
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
# and we still loop over each base chunk
all_chunks=[]
for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):
ch = delayed(load_single_chunk(self,data_file,ptf,selector))
all_chunks.append(ch)
return all_chunks
class MockSelector:
is_all_data = True
class MockChunkObject:
def __init__(self, data_file):
self.data_files = [data_file]
class MockChunk:
def __init__(self, data_file):
self.objs = [MockChunkObject(data_file)]
@dask.delayed
def npmeth(chunk,meths=['min']):
# returns attribute values like min(), max(), size.
results = []
if len(chunk)>0:
x = chunk[0][1]
for meth in meths:
if hasattr(x,meth):
this_meth = getattr(x,meth)
if callable(this_meth):
results.append(this_meth())
else:
results.append(this_meth)
return dict(zip(meths,results))
def selector_stats(ds,ptf,selector=None):
# for a given dataset and selctor, calculate some local and global stats.
# "local" = each chunk, "global" = across chunks
if selector is None:
selector = MockSelector()
# assemble data_file "mock" chunks
chunks = [MockChunk(data_file) for data_file in ds.index.data_files]
# read chunks (delayed)
print("building delayed read objects ")
my_gen_delayed = read_particle_fields_delayed(ds.index.io, chunks, ptf, selector)
# calculate chunk stats (delayed)
print("building delayed stats")
stat_meths = ['min','max','sum','size','std']
stats = [npmeth(chunk,stat_meths) for chunk in my_gen_delayed]
# actually read and compute the stats -- chunk data will not be stored, only the stats
print("computing local stats")
chunk_stats = dask.compute(*stats)
# calculate some global stats
print("computing global stats")
minvals = []
maxvals = []
total_sum = 0
total_size = 0
for chunk in chunk_stats:
if 'sum' in chunk.keys() and 'size' in chunk.keys():
total_sum += chunk['sum']
total_size += chunk['size']
if 'min' in chunk.keys():
minvals.append(chunk['min'])
if 'max' in chunk.keys():
maxvals.append(chunk['max'])
global_stats = {'min': np.min(minvals),
'max': np.min(maxvals),
'mean': total_sum / total_size,
'size':total_size}
return global_stats, chunk_stats
| [
"h5py.File",
"numpy.empty",
"numpy.min",
"dask.compute",
"dask.array.from_array"
] | [((1019, 1058), 'h5py.File', 'h5py.File', (['data_file.filename'], {'mode': '"""r"""'}), "(data_file.filename, mode='r')\n", (1028, 1058), False, 'import h5py\n'), ((6235, 6255), 'dask.compute', 'dask.compute', (['*stats'], {}), '(*stats)\n', (6247, 6255), False, 'import dask\n'), ((6794, 6809), 'numpy.min', 'np.min', (['minvals'], {}), '(minvals)\n', (6800, 6809), True, 'import numpy as np\n'), ((6839, 6854), 'numpy.min', 'np.min', (['maxvals'], {}), '(maxvals)\n', (6845, 6854), True, 'import numpy as np\n'), ((2121, 2156), 'numpy.empty', 'np.empty', (['mask_sum'], {'dtype': '"""float64"""'}), "(mask_sum, dtype='float64')\n", (2129, 2156), True, 'import numpy as np\n'), ((3951, 3993), 'dask.array.from_array', 'da.from_array', (['data'], {'chunks': 'subchunk_shape'}), '(data, chunks=subchunk_shape)\n', (3964, 3993), True, 'from dask import dataframe as df, array as da, delayed, compute\n')] |
import math
import numpy as np
from plyfile import PlyData, PlyElement
from random import *
def _gen_noise(noise_range):
r = np.random.randn()
x = noise_range * r
r = np.random.randn()
y = noise_range * r
r = np.random.randn()
z = noise_range * r
return x, y, z
def read_ply(file_name):
"""
读取 Ply
:param file_name: 文件名
:return: 返回顶点数据和面数据
"""
ply_data = PlyData.read(file_name)
vertex_data_ = None
face_data_ = None
for element in ply_data.elements:
if element.name == 'vertex' or element.name == 'vertices':
vertex_data_ = element.data
elif element.name == 'face' or element.name == 'faces':
face_data_ = element.data
pass
pass
if vertex_data_ is None or face_data_ is None:
print('no face data, exit')
exit(-1)
return vertex_data_, face_data_
def _gen_noisy_data(vertex_data, face_data):
point_cloud = []
for face in face_data:
face = face[0]
point_cloud += _gen_noisy_face(vertex_data, face)
return point_cloud
# 采样分辨率,越小,点云越密
_RESOLUTION = 0.0005
# _RESOLUTION = 0.005
def length_of_vector(triple):
return math.sqrt(triple[0] * triple[0] + triple[1] * triple[1] + triple[2] * triple[2])
# 计算向量叉积
def cross(v1, v2):
return (
v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0])
def _gen_noisy_face(vertex_data, face_vert_indices):
if len(face_vert_indices) < 3:
return []
if len(face_vert_indices) > 3:
vert_indices_res = list(face_vert_indices[0:1]) + list(face_vert_indices[2:])
return _gen_noisy_face(vertex_data, face_vert_indices[:3]) \
+ _gen_noisy_face(vertex_data, vert_indices_res)
points = []
a = vertex_data[face_vert_indices[0]]
b = vertex_data[face_vert_indices[1]]
c = vertex_data[face_vert_indices[2]]
ab = (b[0] - a[0], b[1] - a[1], b[2] - a[2])
ac = (c[0] - a[0], c[1] - a[1], c[2] - a[2])
# 三角形参数方程:p = OA + u·AB + v·AC, u + v <= 1
num = int(length_of_vector(cross(ab, ac)) * 0.5 / _RESOLUTION)
r = Random()
count = 0
while count < num:
u = r.random()
v = r.random()
if u + v > 1:
continue
pass
noise = _gen_noise(_RESOLUTION * 0.25)
point = (
a[0] + ab[0] * u + ac[0] * v + noise[0],
a[1] + ab[1] * u + ac[1] * v + noise[1],
a[2] + ab[2] * u + ac[2] * v + noise[2]
)
points.append(point)
count += 1
pass
return points
def _write_ply(point_cloud, file_name):
pc_np_array = np.array(point_cloud, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_ele = PlyElement.describe(pc_np_array, 'vertex')
PlyData([vertex_ele], text=True).write(file_name)
pass
if __name__ == '__main__':
"""
读取一个 Ply 文件,生成带有噪声的点云
"""
file_name = './cube.ply'
vertex_data, face_data = read_ply(file_name)
pc = _gen_noisy_data(vertex_data, face_data)
_write_ply(pc, file_name[:-3] + "pc.ply")
pass
| [
"plyfile.PlyElement.describe",
"math.sqrt",
"numpy.random.randn",
"plyfile.PlyData",
"numpy.array",
"plyfile.PlyData.read"
] | [((131, 148), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (146, 148), True, 'import numpy as np\n'), ((181, 198), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (196, 198), True, 'import numpy as np\n'), ((231, 248), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (246, 248), True, 'import numpy as np\n'), ((411, 434), 'plyfile.PlyData.read', 'PlyData.read', (['file_name'], {}), '(file_name)\n', (423, 434), False, 'from plyfile import PlyData, PlyElement\n'), ((1201, 1286), 'math.sqrt', 'math.sqrt', (['(triple[0] * triple[0] + triple[1] * triple[1] + triple[2] * triple[2])'], {}), '(triple[0] * triple[0] + triple[1] * triple[1] + triple[2] * triple[2]\n )\n', (1210, 1286), False, 'import math\n'), ((2686, 2754), 'numpy.array', 'np.array', (['point_cloud'], {'dtype': "[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]"}), "(point_cloud, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])\n", (2694, 2754), True, 'import numpy as np\n'), ((2772, 2814), 'plyfile.PlyElement.describe', 'PlyElement.describe', (['pc_np_array', '"""vertex"""'], {}), "(pc_np_array, 'vertex')\n", (2791, 2814), False, 'from plyfile import PlyData, PlyElement\n'), ((2819, 2851), 'plyfile.PlyData', 'PlyData', (['[vertex_ele]'], {'text': '(True)'}), '([vertex_ele], text=True)\n', (2826, 2851), False, 'from plyfile import PlyData, PlyElement\n')] |
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
data = np.genfromtxt(path, delimiter = ",",skip_header=1 )
print ("\nData: \n\n",data)
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
census = np.concatenate((new_record,data))
print (census)
#Code starts here
# --------------
#Code starts here
age = np.array(census[:,0])
print(age)
max_age =np.max(age)
print(max_age)
min_age = np.min(age)
print(min_age)
age_mean = np.mean(age)
print (age_mean)
age_std = np.std(age)
print (age_std)
# --------------
#Code starts here
#race 0
race_0 = np.array(census[census[:,2]==0],dtype=int)
print(race_0)
len_0 = len(race_0)
print(len_0)
#race 1
race_1 =np.array(census[census[:,2]==1], dtype=int)
print(race_1)
len_1 =len(race_1)
print(len_1)
#race 2
race_2 = np.array(census[census[:,2]==2],dtype= int)
# print(race_2)
len_2=len(race_2)
print (len_2)
#race 3
race_3 = np.array(census[census[:,2]==3],dtype= int)
# print(race_3)
len_3 = len(race_3)
print (len_3)
#race 4
race_4 = np.array(census[census[:,2]==4],dtype= int)
# print(race_4)
len_4 = len(race_4)
print(len_4)
#Minority Race
minor_race= np.array([len_0,len_1,len_2,len_3,len_4])
print(minor_race)
minority_race = 3
print(minority_race)
# --------------
#Code starts here
senior_citizens = np.array(census[census[:,0]>60], dtype=int)
senior_citizens_len=len(senior_citizens)
working_hours_sum = sum(senior_citizens[:,6])
avg_working_hours = (working_hours_sum/senior_citizens_len)
print(avg_working_hours)
# print(working_hours_sum)
# print(senior_citizens_len)
# print(senior_citizens)
# --------------
#Code starts here
high = np.array(census[census[:,1]>10],dtype=int)
low = np.array(census[census[:,1]<=10],dtype=int)
avg_pay_high = np.mean(high[:,7])
avg_pay_low = np.mean(low[:,7])
print(avg_pay_high)
print(avg_pay_low)
# print(high)
# print(low)
| [
"numpy.std",
"numpy.genfromtxt",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.mean",
"numpy.concatenate"
] | [((134, 183), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (147, 183), True, 'import numpy as np\n'), ((287, 321), 'numpy.concatenate', 'np.concatenate', (['(new_record, data)'], {}), '((new_record, data))\n', (301, 321), True, 'import numpy as np\n'), ((408, 430), 'numpy.array', 'np.array', (['census[:, 0]'], {}), '(census[:, 0])\n', (416, 430), True, 'import numpy as np\n'), ((453, 464), 'numpy.max', 'np.max', (['age'], {}), '(age)\n', (459, 464), True, 'import numpy as np\n'), ((492, 503), 'numpy.min', 'np.min', (['age'], {}), '(age)\n', (498, 503), True, 'import numpy as np\n'), ((532, 544), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (539, 544), True, 'import numpy as np\n'), ((574, 585), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (580, 585), True, 'import numpy as np\n'), ((659, 705), 'numpy.array', 'np.array', (['census[census[:, 2] == 0]'], {'dtype': 'int'}), '(census[census[:, 2] == 0], dtype=int)\n', (667, 705), True, 'import numpy as np\n'), ((772, 818), 'numpy.array', 'np.array', (['census[census[:, 2] == 1]'], {'dtype': 'int'}), '(census[census[:, 2] == 1], dtype=int)\n', (780, 818), True, 'import numpy as np\n'), ((886, 932), 'numpy.array', 'np.array', (['census[census[:, 2] == 2]'], {'dtype': 'int'}), '(census[census[:, 2] == 2], dtype=int)\n', (894, 932), True, 'import numpy as np\n'), ((1002, 1048), 'numpy.array', 'np.array', (['census[census[:, 2] == 3]'], {'dtype': 'int'}), '(census[census[:, 2] == 3], dtype=int)\n', (1010, 1048), True, 'import numpy as np\n'), ((1120, 1166), 'numpy.array', 'np.array', (['census[census[:, 2] == 4]'], {'dtype': 'int'}), '(census[census[:, 2] == 4], dtype=int)\n', (1128, 1166), True, 'import numpy as np\n'), ((1245, 1290), 'numpy.array', 'np.array', (['[len_0, len_1, len_2, len_3, len_4]'], {}), '([len_0, len_1, len_2, len_3, len_4])\n', (1253, 1290), True, 'import numpy as np\n'), ((1403, 1449), 'numpy.array', 'np.array', (['census[census[:, 0] > 60]'], {'dtype': 'int'}), '(census[census[:, 0] > 60], dtype=int)\n', (1411, 1449), True, 'import numpy as np\n'), ((1754, 1800), 'numpy.array', 'np.array', (['census[census[:, 1] > 10]'], {'dtype': 'int'}), '(census[census[:, 1] > 10], dtype=int)\n', (1762, 1800), True, 'import numpy as np\n'), ((1804, 1851), 'numpy.array', 'np.array', (['census[census[:, 1] <= 10]'], {'dtype': 'int'}), '(census[census[:, 1] <= 10], dtype=int)\n', (1812, 1851), True, 'import numpy as np\n'), ((1864, 1883), 'numpy.mean', 'np.mean', (['high[:, 7]'], {}), '(high[:, 7])\n', (1871, 1883), True, 'import numpy as np\n'), ((1898, 1916), 'numpy.mean', 'np.mean', (['low[:, 7]'], {}), '(low[:, 7])\n', (1905, 1916), True, 'import numpy as np\n')] |
from __future__ import division
import unittest
import numpy as np
from scipy.signal import butter
from wyrm.types import Data
from wyrm.processing import lfilter, spectrum
from wyrm.processing import swapaxes
class TestLFilter(unittest.TestCase):
def setUp(self):
# create some data
fs = 100
dt = 5
self.freqs = [2, 7, 15]
amps = [30, 10, 2]
t = np.linspace(0, dt, fs*dt)
data = np.sum([a * np.sin(2*np.pi*t*f) for a, f in zip(amps, self.freqs)], axis=0)
data = data[:, np.newaxis]
data = np.concatenate([data, data], axis=1)
channel = np.array(['ch1', 'ch2'])
self.dat = Data(data, [t, channel], ['time', 'channel'], ['s', '#'])
self.dat.fs = fs
def test_bandpass(self):
"""Band pass filtering."""
# bandpass around the middle frequency
fn = self.dat.fs / 2
b, a = butter(4, [6 / fn, 8 / fn], btype='band')
ans = lfilter(self.dat, b, a)
# check if the desired band is not damped
dat = spectrum(ans)
mask = dat.axes[0] == 7
self.assertTrue((dat.data[mask] > 6.5).all())
# check if the outer freqs are damped close to zero
mask = (dat.axes[0] <= 6) & (dat.axes[0] > 8)
self.assertTrue((dat.data[mask] < .5).all())
def test_lfilter_copy(self):
"""lfilter must not modify argument."""
cpy = self.dat.copy()
fn = self.dat.fs / 2
b, a = butter(4, [6 / fn, 8 / fn], btype='band')
lfilter(self.dat, b, a)
self.assertEqual(cpy, self.dat)
def test_lfilter_swapaxes(self):
"""lfilter must work with nonstandard timeaxis."""
fn = self.dat.fs / 2
b, a = butter(4, [6 / fn, 8 / fn], btype='band')
dat = lfilter(swapaxes(self.dat, 0, 1), b, a, timeaxis=1)
dat = swapaxes(dat, 0, 1)
dat2 = lfilter(self.dat, b, a)
self.assertEqual(dat, dat2)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"wyrm.processing.spectrum",
"wyrm.processing.lfilter",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"wyrm.processing.swapaxes",
"wyrm.types.Data",
"scipy.signal.butter",
"numpy.concatenate"
] | [((1982, 1997), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1995, 1997), False, 'import unittest\n'), ((405, 432), 'numpy.linspace', 'np.linspace', (['(0)', 'dt', '(fs * dt)'], {}), '(0, dt, fs * dt)\n', (416, 432), True, 'import numpy as np\n'), ((572, 608), 'numpy.concatenate', 'np.concatenate', (['[data, data]'], {'axis': '(1)'}), '([data, data], axis=1)\n', (586, 608), True, 'import numpy as np\n'), ((627, 651), 'numpy.array', 'np.array', (["['ch1', 'ch2']"], {}), "(['ch1', 'ch2'])\n", (635, 651), True, 'import numpy as np\n'), ((671, 728), 'wyrm.types.Data', 'Data', (['data', '[t, channel]', "['time', 'channel']", "['s', '#']"], {}), "(data, [t, channel], ['time', 'channel'], ['s', '#'])\n", (675, 728), False, 'from wyrm.types import Data\n'), ((910, 951), 'scipy.signal.butter', 'butter', (['(4)', '[6 / fn, 8 / fn]'], {'btype': '"""band"""'}), "(4, [6 / fn, 8 / fn], btype='band')\n", (916, 951), False, 'from scipy.signal import butter\n'), ((966, 989), 'wyrm.processing.lfilter', 'lfilter', (['self.dat', 'b', 'a'], {}), '(self.dat, b, a)\n', (973, 989), False, 'from wyrm.processing import lfilter, spectrum\n'), ((1054, 1067), 'wyrm.processing.spectrum', 'spectrum', (['ans'], {}), '(ans)\n', (1062, 1067), False, 'from wyrm.processing import lfilter, spectrum\n'), ((1477, 1518), 'scipy.signal.butter', 'butter', (['(4)', '[6 / fn, 8 / fn]'], {'btype': '"""band"""'}), "(4, [6 / fn, 8 / fn], btype='band')\n", (1483, 1518), False, 'from scipy.signal import butter\n'), ((1527, 1550), 'wyrm.processing.lfilter', 'lfilter', (['self.dat', 'b', 'a'], {}), '(self.dat, b, a)\n', (1534, 1550), False, 'from wyrm.processing import lfilter, spectrum\n'), ((1732, 1773), 'scipy.signal.butter', 'butter', (['(4)', '[6 / fn, 8 / fn]'], {'btype': '"""band"""'}), "(4, [6 / fn, 8 / fn], btype='band')\n", (1738, 1773), False, 'from scipy.signal import butter\n'), ((1854, 1873), 'wyrm.processing.swapaxes', 'swapaxes', (['dat', '(0)', '(1)'], {}), '(dat, 0, 1)\n', (1862, 1873), False, 'from wyrm.processing import swapaxes\n'), ((1889, 1912), 'wyrm.processing.lfilter', 'lfilter', (['self.dat', 'b', 'a'], {}), '(self.dat, b, a)\n', (1896, 1912), False, 'from wyrm.processing import lfilter, spectrum\n'), ((1796, 1820), 'wyrm.processing.swapaxes', 'swapaxes', (['self.dat', '(0)', '(1)'], {}), '(self.dat, 0, 1)\n', (1804, 1820), False, 'from wyrm.processing import swapaxes\n'), ((458, 483), 'numpy.sin', 'np.sin', (['(2 * np.pi * t * f)'], {}), '(2 * np.pi * t * f)\n', (464, 483), True, 'import numpy as np\n')] |
"""
"""
import os
import numpy as np
from ..nfw_evolution import lgc_vs_lgt, u_lgc_vs_lgt, CONC_MIN
from ..nfw_evolution import CONC_PARAM_BOUNDS, DEFAULT_CONC_PARAMS
from ..nfw_evolution import get_bounded_params, get_unbounded_params
_THIS_DRNAME = os.path.dirname(os.path.abspath(__file__))
DDRN = os.path.join(_THIS_DRNAME, "testing_data")
def test_bounded_params():
p = np.array(list(DEFAULT_CONC_PARAMS.values()))
u_p = get_unbounded_params(p)
p2 = get_bounded_params(u_p)
assert np.allclose(p, p2, atol=0.01)
def test_default_params_are_within_bounds():
for key, bounds in CONC_PARAM_BOUNDS.items():
assert bounds[0] < DEFAULT_CONC_PARAMS[key] < bounds[1]
def test_unbounded_params():
n_test = 10
for itest in range(n_test):
rng = np.random.RandomState(itest)
up = rng.uniform(-5, 5, 4)
p = get_bounded_params(up)
up2 = get_unbounded_params(p)
assert np.allclose(up, up2, atol=0.01)
def test_consistency_u_lgc_vs_lgt():
lgtarr = np.linspace(-1, 1.14, 50)
n_test = 10
for itest in range(n_test):
rng = np.random.RandomState(itest)
up = rng.uniform(-5, 5, 4)
p = get_bounded_params(up)
lgc = lgc_vs_lgt(lgtarr, *p)
lgc2 = u_lgc_vs_lgt(lgtarr, *up)
assert np.allclose(lgc, lgc2, atol=0.01)
def test_lgc_vs_lgt_behaves_reasonably_at_defaults():
lgtarr = np.linspace(-1, 1.14, 50)
p = np.array(list(DEFAULT_CONC_PARAMS.values()))
lgc = lgc_vs_lgt(lgtarr, *p)
assert np.all(lgc >= np.log10(CONC_MIN))
assert np.all(lgc < 2.0)
def test_agreement_with_hard_coded_data():
"""The two ASCII data files testing_data/tarr.txt
and testing_data/lgconc_at_tarr.txt contain tabulations of the correct values of
the lgc_vs_lgt function for the parameter values stored in the header of
testing_data/lgconc_at_tarr.txt. This unit test enforces agreement between the
diffprof source code and that tabulation.
"""
tarr = np.loadtxt(os.path.join(DDRN, "tarr.txt"))
lgtarr = np.log10(tarr)
lgc_correct = np.loadtxt(os.path.join(DDRN, "lgconc_at_tarr.txt"))
with open(os.path.join(DDRN, "lgconc_at_tarr.txt"), "r") as f:
next(f)
param_string = next(f)
params = [float(x) for x in param_string.strip().split()[1:]]
lgc = lgc_vs_lgt(lgtarr, *params)
assert np.allclose(lgc, lgc_correct, atol=0.01)
| [
"os.path.abspath",
"numpy.allclose",
"numpy.random.RandomState",
"numpy.linspace",
"numpy.log10",
"os.path.join",
"numpy.all"
] | [((302, 344), 'os.path.join', 'os.path.join', (['_THIS_DRNAME', '"""testing_data"""'], {}), "(_THIS_DRNAME, 'testing_data')\n", (314, 344), False, 'import os\n'), ((268, 293), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (283, 293), False, 'import os\n'), ((505, 534), 'numpy.allclose', 'np.allclose', (['p', 'p2'], {'atol': '(0.01)'}), '(p, p2, atol=0.01)\n', (516, 534), True, 'import numpy as np\n'), ((1025, 1050), 'numpy.linspace', 'np.linspace', (['(-1)', '(1.14)', '(50)'], {}), '(-1, 1.14, 50)\n', (1036, 1050), True, 'import numpy as np\n'), ((1408, 1433), 'numpy.linspace', 'np.linspace', (['(-1)', '(1.14)', '(50)'], {}), '(-1, 1.14, 50)\n', (1419, 1433), True, 'import numpy as np\n'), ((1576, 1593), 'numpy.all', 'np.all', (['(lgc < 2.0)'], {}), '(lgc < 2.0)\n', (1582, 1593), True, 'import numpy as np\n'), ((2059, 2073), 'numpy.log10', 'np.log10', (['tarr'], {}), '(tarr)\n', (2067, 2073), True, 'import numpy as np\n'), ((2374, 2414), 'numpy.allclose', 'np.allclose', (['lgc', 'lgc_correct'], {'atol': '(0.01)'}), '(lgc, lgc_correct, atol=0.01)\n', (2385, 2414), True, 'import numpy as np\n'), ((789, 817), 'numpy.random.RandomState', 'np.random.RandomState', (['itest'], {}), '(itest)\n', (810, 817), True, 'import numpy as np\n'), ((941, 972), 'numpy.allclose', 'np.allclose', (['up', 'up2'], {'atol': '(0.01)'}), '(up, up2, atol=0.01)\n', (952, 972), True, 'import numpy as np\n'), ((1113, 1141), 'numpy.random.RandomState', 'np.random.RandomState', (['itest'], {}), '(itest)\n', (1134, 1141), True, 'import numpy as np\n'), ((1305, 1338), 'numpy.allclose', 'np.allclose', (['lgc', 'lgc2'], {'atol': '(0.01)'}), '(lgc, lgc2, atol=0.01)\n', (1316, 1338), True, 'import numpy as np\n'), ((2014, 2044), 'os.path.join', 'os.path.join', (['DDRN', '"""tarr.txt"""'], {}), "(DDRN, 'tarr.txt')\n", (2026, 2044), False, 'import os\n'), ((2103, 2143), 'os.path.join', 'os.path.join', (['DDRN', '"""lgconc_at_tarr.txt"""'], {}), "(DDRN, 'lgconc_at_tarr.txt')\n", (2115, 2143), False, 'import os\n'), ((1545, 1563), 'numpy.log10', 'np.log10', (['CONC_MIN'], {}), '(CONC_MIN)\n', (1553, 1563), True, 'import numpy as np\n'), ((2159, 2199), 'os.path.join', 'os.path.join', (['DDRN', '"""lgconc_at_tarr.txt"""'], {}), "(DDRN, 'lgconc_at_tarr.txt')\n", (2171, 2199), False, 'import os\n')] |
import os
import numpy as np
class DeepDIVADatasetAdapter(object):
"""
Creates a directory & file based training environment that natively works with DeepDIVA CNN implementation.
Symlinks are used to reference files in self.root directory.
"""
def __init__(self, input_dir):
self.root = input_dir
# return [[path, label],
# [path2], label]
def read_folder_dataset(self, subfolder="train"):
"""
:param subfolder: string. subfolder to scan for files/images
:return: 2D ndarray. [[file_path, label]...]
"""
dataset_root = os.path.join(self.root, subfolder)
dataset = []
for label in os.listdir(dataset_root):
label_path = os.path.join(dataset_root, label)
files = os.listdir(label_path)
for picture in files:
dataset.append(os.path.join(label_path, picture))
dataset.append(label)
return np.array(dataset).reshape(len(dataset) // 2, 2)
def create_symlink_dataset(self, dataset, output_dir, subfolder='train'):
"""
:param dataset: 2D ndarray. [[file_path, label]...]
:param output_dir: string, root path for symlinks
:param subfolder: string: train, val, test
"""
for picture_path, label in dataset:
label_dir = os.path.join(output_dir, subfolder, label)
filename = os.path.basename(picture_path)
os.makedirs(label_dir, exist_ok=True)
os.symlink(picture_path, os.path.join(label_dir, filename))
def copy_symlink(self, output_dir, subfolder='train'):
ds = self.read_folder_dataset(subfolder)
self.create_symlink_dataset(ds, output_dir, subfolder)
| [
"os.makedirs",
"os.path.basename",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((610, 644), 'os.path.join', 'os.path.join', (['self.root', 'subfolder'], {}), '(self.root, subfolder)\n', (622, 644), False, 'import os\n'), ((687, 711), 'os.listdir', 'os.listdir', (['dataset_root'], {}), '(dataset_root)\n', (697, 711), False, 'import os\n'), ((738, 771), 'os.path.join', 'os.path.join', (['dataset_root', 'label'], {}), '(dataset_root, label)\n', (750, 771), False, 'import os\n'), ((792, 814), 'os.listdir', 'os.listdir', (['label_path'], {}), '(label_path)\n', (802, 814), False, 'import os\n'), ((1357, 1399), 'os.path.join', 'os.path.join', (['output_dir', 'subfolder', 'label'], {}), '(output_dir, subfolder, label)\n', (1369, 1399), False, 'import os\n'), ((1423, 1453), 'os.path.basename', 'os.path.basename', (['picture_path'], {}), '(picture_path)\n', (1439, 1453), False, 'import os\n'), ((1466, 1503), 'os.makedirs', 'os.makedirs', (['label_dir'], {'exist_ok': '(True)'}), '(label_dir, exist_ok=True)\n', (1477, 1503), False, 'import os\n'), ((969, 986), 'numpy.array', 'np.array', (['dataset'], {}), '(dataset)\n', (977, 986), True, 'import numpy as np\n'), ((1541, 1574), 'os.path.join', 'os.path.join', (['label_dir', 'filename'], {}), '(label_dir, filename)\n', (1553, 1574), False, 'import os\n'), ((880, 913), 'os.path.join', 'os.path.join', (['label_path', 'picture'], {}), '(label_path, picture)\n', (892, 913), False, 'import os\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
__author__ = "<NAME> (Galarius)"
"""
Генерация условной карты местности,
двух маршрутов и равномерно распределённых
точек, относящихся к одному из двух маршрутов.
Визуализация условной карты местности.
"""
import sys # exit()
import numpy as np # матрицы и вектора
# графики
from pylab import plt
class Map(object):
"""
Условная карта местности с двумя маршрутами.
Позволяет сгенерировать равномерно распределённые
точки, относящиеся к одному из двух маршрутов.
Предоставляет визуализацию условной карты местности.
"""
def __init__(self, width, height):
# размер карты
self.width = width
self.height = height
# старт
self.o1 = np.array([0, np.random.randint(self.height-2) + 1], dtype=float)
# промежуточная точка маршрута 1
self.a = np.array([np.random.randint(self.width-4) + 2, 0], dtype=float)
# промежуточная точка маршрута 2
self.b = np.array([np.random.randint(self.width-4) + 2, self.height], dtype=float)
# цель
self.o2 = np.array([self.width, np.random.randint(self.height-2)+1], dtype=float)
def dataset(self, trajectory, npoints, uniform = True):
# Triangle Point Picking
data = []
if trajectory == 0:
data = Map.distrInTriangle(self.o1, self.a, self.o2, npoints * 2, uniform)
p = np.array([0, 0], dtype = float)
data.extend(Map.distrInTriangle(self.o1, p, self.a, npoints, uniform))
p = np.array([self.width, 0], dtype = float)
data.extend(Map.distrInTriangle(self.a, p, self.o2, npoints, uniform))
else:
data = Map.distrInTriangle(self.o1, self.b, self.o2, npoints * 2, uniform)
p = np.array([0, self.height], dtype = float)
data.extend(Map.distrInTriangle(p, self.o1, self.b, npoints, uniform))
p = np.array([self.width, self.height], dtype = float)
data.extend(Map.distrInTriangle(self.b, self.o2, p, npoints, uniform))
return np.array(data)
def plotMap(self, fname=None):
fig, ax = plt.subplots()
ax.plot([self.o1[0], self.a[0], self.o2[0]], [self.o1[1], self.a[1], self.o2[1]], 'r', label='Trajectory 0')
ax.plot([self.o1[0], self.b[0], self.o2[0]], [self.o1[1], self.b[1], self.o2[1]], 'b--', label='Trajectory 1')
legend = ax.legend(loc='best', framealpha=0.5)
plt.title("Map")
plt.grid(True)
if fname:
plt.savefig(fname)
plt.show()
def plot(self, good, bad, dataset0, dataset1, fname=None):
fig, ax = plt.subplots()
ax.plot([self.o1[0], self.a[0], self.o2[0]], [self.o1[1], self.a[1], self.o2[1]], 'r', label='Trajectory 0')
ax.plot([self.o1[0], self.b[0], self.o2[0]], [self.o1[1], self.b[1], self.o2[1]], 'b--', label='Trajectory 1')
if dataset0.any():
ax.plot(dataset0[:,0], dataset0[:,1], 'ro', label='Train Dataset 0')
if dataset1.any():
ax.plot(dataset1[:,0], dataset1[:,1], 'b*', label='Train Dataset 1')
if good.any():
ax.plot(good[:,0], good[:,1], 'go', markersize=10, label='Correct prediction')
if bad.any():
ax.plot(bad[:,0], bad[:,1], 'black', linestyle='none', marker='D', markersize=10, label='Incorrect prediction')
legend = ax.legend(loc='best', framealpha=0.5)
plt.title("Map")
plt.grid(True)
if fname:
plt.savefig(fname)
plt.show()
@staticmethod
def triangleArea(p0, p1, p2):
"""
Вычисление площади треугольника
:param p0,p1,p2 - координаты треугольника
"""
return 0.5 * (-p1[1] * p2[0] + p0[1] * (-p1[0] + p2[0]) + p0[0] * (p1[1] - p2[1]) + p1[0] * p2[1])
@staticmethod
def insideTriangle(p, p0, p1, p2, area):
"""
Проверка нахождения точки внутри треугольника
при помощи барицентрических координат.
:param p - координаты точки для проверки
:param p0,p1,p2 - координаты треугольника
"""
s = 1.0 / (2.0 * area) * (p0[1] * p2[0] - p0[0] * p2[1] + (p2[1] - p0[1]) * p[0] + (p0[0] - p2[0]) * p[1])
t = 1.0 / (2.0 * area) * (p0[0] * p1[1] - p0[1] * p1[0] + (p0[1] - p1[1]) * p[0] + (p1[0] - p0[0]) * p[1])
return s > 0 and t > 0 and 1-s-t > 0
@staticmethod
def distrInTriangle(p0, p1, p2, npoints, uniform = True):
"""
Генерация точек внутри треугольника
методом Triangle Point Picking
:param p0,p1,p2 - координаты треугольника
:param npoints - количество точек
:uniform - равномерное распределение
"""
data = []
v0 = p0
v1 = p1 - v0
v2 = p2 - v0
area = Map.triangleArea(p0, p1, p2)
if uniform:
npoints *= 2
for i in xrange(npoints):
a1 = np.random.random()
a2 = np.random.random()
if uniform:
x = a1 * v1 + a2 * v2 + v0
if Map.insideTriangle(x, p0, p1, p2, area):
data.append(x)
else:
x = a1 * v1 + (1 - a1) * a2 * v2 + v0
data.append(x)
return data
| [
"pylab.plt.savefig",
"pylab.plt.show",
"pylab.plt.title",
"pylab.plt.grid",
"numpy.random.random",
"numpy.array",
"numpy.random.randint",
"pylab.plt.subplots"
] | [((2096, 2110), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2104, 2110), True, 'import numpy as np\n'), ((2165, 2179), 'pylab.plt.subplots', 'plt.subplots', ([], {}), '()\n', (2177, 2179), False, 'from pylab import plt\n'), ((2479, 2495), 'pylab.plt.title', 'plt.title', (['"""Map"""'], {}), "('Map')\n", (2488, 2495), False, 'from pylab import plt\n'), ((2504, 2518), 'pylab.plt.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2512, 2518), False, 'from pylab import plt\n'), ((2576, 2586), 'pylab.plt.show', 'plt.show', ([], {}), '()\n', (2584, 2586), False, 'from pylab import plt\n'), ((2669, 2683), 'pylab.plt.subplots', 'plt.subplots', ([], {}), '()\n', (2681, 2683), False, 'from pylab import plt\n'), ((3459, 3475), 'pylab.plt.title', 'plt.title', (['"""Map"""'], {}), "('Map')\n", (3468, 3475), False, 'from pylab import plt\n'), ((3484, 3498), 'pylab.plt.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3492, 3498), False, 'from pylab import plt\n'), ((3556, 3566), 'pylab.plt.show', 'plt.show', ([], {}), '()\n', (3564, 3566), False, 'from pylab import plt\n'), ((1434, 1463), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'float'}), '([0, 0], dtype=float)\n', (1442, 1463), True, 'import numpy as np\n'), ((1565, 1603), 'numpy.array', 'np.array', (['[self.width, 0]'], {'dtype': 'float'}), '([self.width, 0], dtype=float)\n', (1573, 1603), True, 'import numpy as np\n'), ((1806, 1845), 'numpy.array', 'np.array', (['[0, self.height]'], {'dtype': 'float'}), '([0, self.height], dtype=float)\n', (1814, 1845), True, 'import numpy as np\n'), ((1947, 1995), 'numpy.array', 'np.array', (['[self.width, self.height]'], {'dtype': 'float'}), '([self.width, self.height], dtype=float)\n', (1955, 1995), True, 'import numpy as np\n'), ((2549, 2567), 'pylab.plt.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (2560, 2567), False, 'from pylab import plt\n'), ((3529, 3547), 'pylab.plt.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (3540, 3547), False, 'from pylab import plt\n'), ((4947, 4965), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4963, 4965), True, 'import numpy as np\n'), ((4983, 5001), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4999, 5001), True, 'import numpy as np\n'), ((780, 814), 'numpy.random.randint', 'np.random.randint', (['(self.height - 2)'], {}), '(self.height - 2)\n', (797, 814), True, 'import numpy as np\n'), ((900, 933), 'numpy.random.randint', 'np.random.randint', (['(self.width - 4)'], {}), '(self.width - 4)\n', (917, 933), True, 'import numpy as np\n'), ((1022, 1055), 'numpy.random.randint', 'np.random.randint', (['(self.width - 4)'], {}), '(self.width - 4)\n', (1039, 1055), True, 'import numpy as np\n'), ((1141, 1175), 'numpy.random.randint', 'np.random.randint', (['(self.height - 2)'], {}), '(self.height - 2)\n', (1158, 1175), True, 'import numpy as np\n')] |
"""
Created on Mon Jun 24 10:52:25 2019
Reads a wav file with SDR IQ capture of FM stations located in :
https://mega.nz/#F!3UUUnSiD!WLhWZ3ff4f4Pi7Ko_zcodQ
Also generates IQ stream sampled at 2.4Msps to simulate a similar spectrum
sinusoids, this might be useful in an early stage to use a known signal.
@author: f.divruno
"""
#!/usr/bin/env python3
import wave
import numpy as np
import matplotlib.pyplot as plt
# ------------ PARAMETERS
N = 5000 #number of samples to read
nAverages = 10 # number of averages
folder = "C:\\Users\\F.Divruno\\Downloads\\" # change this to your folder.
filename = "17-22-08_89100kHz.wav"
CenterFrequency = 89100e3 # Centre freq of the recording is the number at the end of the filename.
# ------------
#Read an IQ recording of FM stations:
wav_in = wave.open(folder+ filename, "r")
sampleFreq = 2.4e6 # sample freq of the SDR to acquire this signals
timeMax = N/sampleFreq # duration of the loaded signals
t = np.linspace(0,timeMax,N)
# Read the file
I = np.zeros(N)
Q = np.zeros(N)
for n in range(N):
aux = wav_in.readframes(1)
I[n] = aux[0]
Q[n] = aux[1]
# Plot the spectrum of the recording
I_fft = np.fft.fftshift(np.fft.fft(I))
Q_fft = np.fft.fftshift(np.fft.fft(Q))
V = abs(I_fft-1j*Q_fft)
freq = np.fft.fftshift(np.fft.fftfreq(N,d=1/sampleFreq) + CenterFrequency)
plt.figure()
plt.subplot(2,1,1)
plt.plot(freq/1e6,20*np.log10(V))
plt.xlabel('MHz')
plt.ylabel('dB')
plt.title('Recording')
#test signal generated with tone signals
I = np.zeros(N)
Q = np.zeros(N)
foStation = np.array([88, 88.4, 88.6, 88.8 ,89.4, 89.6, 89.8, 90, 90.2])*1e6
numStations = len(foStation)
for k in range(numStations):
fcent = (foStation[k] - CenterFrequency)/2
for i in range(20):
fc = fcent-10*1e3 + 1e3*i
phase = np.random.random(1)*2*np.pi
I += np.sin(2*np.pi*fc*t+phase)*np.sin(2*np.pi*fc*t)
Q += np.sin(2*np.pi*fc*t+phase)*np.cos(2*np.pi*fc*t)
I_fft = np.fft.fftshift(np.fft.fft(I))
Q_fft = np.fft.fftshift(np.fft.fft(Q))
V = abs(I_fft-1j*Q_fft)
plt.subplot(2,1,2)
plt.plot(freq/1e6,20*np.log10(V),'g')
plt.xlabel('MHz')
plt.ylabel('dB')
plt.title('syntethized')
#%%Average the IQ recording of FM stations:
wav_in.rewind()
V = np.zeros(N)
for k in range(nAverages):
for n in range(N):
aux = wav_in.readframes(1)
I[n] = aux[0]
Q[n] = aux[1]
I_fft = np.fft.fftshift(np.fft.fft(I))
Q_fft = np.fft.fftshift(np.fft.fft(Q))
V += abs(I_fft-1j*Q_fft)
V /= nAverages
plt.figure()
plt.plot(freq/1e6,20*np.log10(V))
plt.title('Averaged %d times'%nAverages)
plt.xlabel('MHz')
plt.ylabel('dB')
| [
"matplotlib.pyplot.title",
"wave.open",
"matplotlib.pyplot.subplot",
"numpy.fft.fft",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.fft.fftfreq",
"numpy.array",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.random.random",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.p... | [((818, 851), 'wave.open', 'wave.open', (['(folder + filename)', '"""r"""'], {}), "(folder + filename, 'r')\n", (827, 851), False, 'import wave\n'), ((980, 1006), 'numpy.linspace', 'np.linspace', (['(0)', 'timeMax', 'N'], {}), '(0, timeMax, N)\n', (991, 1006), True, 'import numpy as np\n'), ((1026, 1037), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1034, 1037), True, 'import numpy as np\n'), ((1042, 1053), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1050, 1053), True, 'import numpy as np\n'), ((1364, 1376), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1374, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1397), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1388, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1447), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MHz"""'], {}), "('MHz')\n", (1440, 1447), True, 'import matplotlib.pyplot as plt\n'), ((1448, 1464), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dB"""'], {}), "('dB')\n", (1458, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1487), 'matplotlib.pyplot.title', 'plt.title', (['"""Recording"""'], {}), "('Recording')\n", (1474, 1487), True, 'import matplotlib.pyplot as plt\n'), ((1536, 1547), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1544, 1547), True, 'import numpy as np\n'), ((1552, 1563), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1560, 1563), True, 'import numpy as np\n'), ((2075, 2095), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2086, 2095), True, 'import matplotlib.pyplot as plt\n'), ((2132, 2149), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MHz"""'], {}), "('MHz')\n", (2142, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2150, 2166), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dB"""'], {}), "('dB')\n", (2160, 2166), True, 'import matplotlib.pyplot as plt\n'), ((2167, 2191), 'matplotlib.pyplot.title', 'plt.title', (['"""syntethized"""'], {}), "('syntethized')\n", (2176, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2270), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2267, 2270), True, 'import numpy as np\n'), ((2539, 2551), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2549, 2551), True, 'import matplotlib.pyplot as plt\n'), ((2586, 2628), 'matplotlib.pyplot.title', 'plt.title', (["('Averaged %d times' % nAverages)"], {}), "('Averaged %d times' % nAverages)\n", (2595, 2628), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MHz"""'], {}), "('MHz')\n", (2637, 2644), True, 'import matplotlib.pyplot as plt\n'), ((2645, 2661), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dB"""'], {}), "('dB')\n", (2655, 2661), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1222), 'numpy.fft.fft', 'np.fft.fft', (['I'], {}), '(I)\n', (1219, 1222), True, 'import numpy as np\n'), ((1248, 1261), 'numpy.fft.fft', 'np.fft.fft', (['Q'], {}), '(Q)\n', (1258, 1261), True, 'import numpy as np\n'), ((1576, 1636), 'numpy.array', 'np.array', (['[88, 88.4, 88.6, 88.8, 89.4, 89.6, 89.8, 90, 90.2]'], {}), '([88, 88.4, 88.6, 88.8, 89.4, 89.6, 89.8, 90, 90.2])\n', (1584, 1636), True, 'import numpy as np\n'), ((1996, 2009), 'numpy.fft.fft', 'np.fft.fft', (['I'], {}), '(I)\n', (2006, 2009), True, 'import numpy as np\n'), ((2035, 2048), 'numpy.fft.fft', 'np.fft.fft', (['Q'], {}), '(Q)\n', (2045, 2048), True, 'import numpy as np\n'), ((1311, 1346), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['N'], {'d': '(1 / sampleFreq)'}), '(N, d=1 / sampleFreq)\n', (1325, 1346), True, 'import numpy as np\n'), ((1417, 1428), 'numpy.log10', 'np.log10', (['V'], {}), '(V)\n', (1425, 1428), True, 'import numpy as np\n'), ((2115, 2126), 'numpy.log10', 'np.log10', (['V'], {}), '(V)\n', (2123, 2126), True, 'import numpy as np\n'), ((2434, 2447), 'numpy.fft.fft', 'np.fft.fft', (['I'], {}), '(I)\n', (2444, 2447), True, 'import numpy as np\n'), ((2477, 2490), 'numpy.fft.fft', 'np.fft.fft', (['Q'], {}), '(Q)\n', (2487, 2490), True, 'import numpy as np\n'), ((2573, 2584), 'numpy.log10', 'np.log10', (['V'], {}), '(V)\n', (2581, 2584), True, 'import numpy as np\n'), ((1861, 1895), 'numpy.sin', 'np.sin', (['(2 * np.pi * fc * t + phase)'], {}), '(2 * np.pi * fc * t + phase)\n', (1867, 1895), True, 'import numpy as np\n'), ((1888, 1914), 'numpy.sin', 'np.sin', (['(2 * np.pi * fc * t)'], {}), '(2 * np.pi * fc * t)\n', (1894, 1914), True, 'import numpy as np\n'), ((1922, 1956), 'numpy.sin', 'np.sin', (['(2 * np.pi * fc * t + phase)'], {}), '(2 * np.pi * fc * t + phase)\n', (1928, 1956), True, 'import numpy as np\n'), ((1949, 1975), 'numpy.cos', 'np.cos', (['(2 * np.pi * fc * t)'], {}), '(2 * np.pi * fc * t)\n', (1955, 1975), True, 'import numpy as np\n'), ((1820, 1839), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (1836, 1839), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/funnel_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="iPLM5TwcMcTe"
# In this notebook, we explore the "funnel of hell". This refers to a posterior in which
# the mean and variance of a variable are highly correlated, and have a funnel
# shape. (The term "funnel of hell" is from [this blog post](https://twiecki.io/blog/2014/03/17/bayesian-glms-3/) by <NAME>.)
#
# We illustrate this using a hierarchical Bayesian model for inferring Gaussian means, fit to synthetic data, similar to 8 schools (except we vary the same size and fix the variance). This code is based on [this notebook](http://bebi103.caltech.edu.s3-website-us-east-1.amazonaws.com/2017/tutorials/aux8_mcmc_tips.html) from <NAME>.
# + id="-sWa3BStE4ov"
# %matplotlib inline
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
# + id="1UEFiUi-qZA1" colab={"base_uri": "https://localhost:8080/"} outputId="1a20ff5d-68e6-4f60-81e0-1456bfa83b5f"
# !pip install -U pymc3>=3.8
import pymc3 as pm
print(pm.__version__)
import arviz as az
print(az.__version__)
# + id="SS-lUcY9ovUd"
import math
import pickle
import numpy as np
import pandas as pd
import scipy.stats as st
import theano.tensor as tt
import theano
# + id="H4iJ8eTAr3yF" colab={"base_uri": "https://localhost:8080/"} outputId="23291ee5-7822-41fb-d3ca-c829cd0891f5"
np.random.seed(0)
# Specify parameters for random data
mu_val = 8
tau_val = 3
sigma_val = 10
n_groups = 10
# Generate number of replicates for each repeat
n = np.random.randint(low=3, high=10, size=n_groups, dtype=int)
print(n)
print(sum(n))
# + id="oyyDYNGfsmUa" colab={"base_uri": "https://localhost:8080/"} outputId="f8d2cf60-fbbd-4a29-fcd6-747cd2e18870"
# Generate data set
mus = np.zeros(n_groups)
x = np.array([])
for i in range(n_groups):
mus[i] = np.random.normal(mu_val, tau_val)
samples = np.random.normal(mus[i], sigma_val, size=n[i])
x = np.append(x, samples)
print(x.shape)
group_ind = np.concatenate([[i]*n_val for i, n_val in enumerate(n)])
# + id="Vz-gdn-zuCcx" colab={"base_uri": "https://localhost:8080/", "height": 692} outputId="19b32b08-cffc-4800-9667-5ff22df6f387"
with pm.Model() as centered_model:
# Hyperpriors
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=2.5)
log_tau = pm.Deterministic('log_tau', tt.log(tau))
# Prior on theta
theta = pm.Normal('theta', mu=mu, sd=tau, shape=n_groups)
# Likelihood
x_obs = pm.Normal('x_obs',
mu=theta[group_ind],
sd=sigma_val,
observed=x)
np.random.seed(0)
with centered_model:
centered_trace = pm.sample(10000, chains=2)
pm.summary(centered_trace).round(2)
# + id="UMLPIRMPsgej" colab={"base_uri": "https://localhost:8080/", "height": 963} outputId="3227aaef-1030-490f-8605-5744d27f269c"
with pm.Model() as noncentered_model:
# Hyperpriors
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=2.5)
log_tau = pm.Deterministic('log_tau', tt.log(tau))
# Prior on theta
#theta = pm.Normal('theta', mu=mu, sd=tau, shape=n_trials)
var_theta = pm.Normal('var_theta', mu=0, sd=1, shape=n_groups)
theta = pm.Deterministic('theta', mu + var_theta * tau)
# Likelihood
x_obs = pm.Normal('x_obs',
mu=theta[group_ind],
sd=sigma_val,
observed=x)
np.random.seed(0)
with noncentered_model:
noncentered_trace = pm.sample(1000, chains=2)
pm.summary(noncentered_trace).round(2)
# + id="XqQQUavXvFWT" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="88b33782-8b68-4057-e1c9-b582e6db8cc1"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['tau'], name='tau')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='µ', ylabel='τ');
axs[0].axhline(0.01)
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['tau'], name='tau')
axs[1].plot(x, y, '.');
axs[1].set(title='NonCentered', xlabel='µ', ylabel='τ');
axs[1].axhline(0.01)
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
# + id="--jgSNVBLadC" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="6cf32ae5-ee7b-4abe-bf8f-b51450bb02d1"
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['tau'], name='tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('centered')
plt.show()
# + id="tEfEJ8JuLX43" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="4869fb30-3d07-4e0c-a6da-03c1014923b3"
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['tau'], name='tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('noncentered')
plt.show()
# + id="1-FQqDkTFEqy" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="b9804230-dc6c-4586-9a5a-1ad38a9cab82"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['log_tau'], name='log_tau')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='µ', ylabel='log(τ)');
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['log_tau'], name='log_tau')
axs[1].plot(x, y, '.');
axs[1].set(title='NonCentered', xlabel='µ', ylabel='log(τ)');
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
# + id="5QqP9pOLHJR5" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="34dfd8db-fc63-44bb-c203-5b2c64cf9d3c"
#https://seaborn.pydata.org/generated/seaborn.jointplot.html
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['log_tau'], name='log_tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('centered')
plt.show()
# + id="7jK4o4idIw_u" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="784cde75-c370-457f-e4df-5bb51595246a"
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['log_tau'], name='log_tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('noncentered')
plt.show()
# + id="KNam0ZuYYhxw" colab={"base_uri": "https://localhost:8080/", "height": 581} outputId="6a73f609-35a5-433f-bb22-09509881998e"
az.plot_forest([centered_trace, noncentered_trace], model_names=['centered', 'noncentered'],
var_names="theta",
combined=True, hdi_prob=0.95);
# + id="sizu9bNdT4K0"
| [
"pymc3.sample",
"numpy.random.seed",
"matplotlib.pyplot.suptitle",
"arviz.plot_forest",
"pymc3.Deterministic",
"pymc3.Normal",
"pymc3.HalfCauchy",
"numpy.random.randint",
"numpy.random.normal",
"theano.tensor.log",
"numpy.append",
"matplotlib.pyplot.subplots",
"pymc3.Model",
"matplotlib.py... | [((1929, 1946), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1943, 1946), True, 'import numpy as np\n'), ((2089, 2148), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(3)', 'high': '(10)', 'size': 'n_groups', 'dtype': 'int'}), '(low=3, high=10, size=n_groups, dtype=int)\n', (2106, 2148), True, 'import numpy as np\n'), ((2315, 2333), 'numpy.zeros', 'np.zeros', (['n_groups'], {}), '(n_groups)\n', (2323, 2333), True, 'import numpy as np\n'), ((2338, 2350), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2346, 2350), True, 'import numpy as np\n'), ((3169, 3186), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3183, 3186), True, 'import numpy as np\n'), ((4009, 4026), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4023, 4026), True, 'import numpy as np\n'), ((4290, 4337), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)'}), '(ncols=2, sharex=True, sharey=True)\n', (4302, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4342, 4384), 'pandas.Series', 'pd.Series', (["centered_trace['mu']"], {'name': '"""mu"""'}), "(centered_trace['mu'], name='mu')\n", (4351, 4384), True, 'import pandas as pd\n'), ((4390, 4434), 'pandas.Series', 'pd.Series', (["centered_trace['tau']"], {'name': '"""tau"""'}), "(centered_trace['tau'], name='tau')\n", (4399, 4434), True, 'import pandas as pd\n'), ((4539, 4584), 'pandas.Series', 'pd.Series', (["noncentered_trace['mu']"], {'name': '"""mu"""'}), "(noncentered_trace['mu'], name='mu')\n", (4548, 4584), True, 'import pandas as pd\n'), ((4590, 4637), 'pandas.Series', 'pd.Series', (["noncentered_trace['tau']"], {'name': '"""tau"""'}), "(noncentered_trace['tau'], name='tau')\n", (4599, 4637), True, 'import pandas as pd\n'), ((4927, 4969), 'pandas.Series', 'pd.Series', (["centered_trace['mu']"], {'name': '"""mu"""'}), "(centered_trace['mu'], name='mu')\n", (4936, 4969), True, 'import pandas as pd\n'), ((4975, 5019), 'pandas.Series', 'pd.Series', (["centered_trace['tau']"], {'name': '"""tau"""'}), "(centered_trace['tau'], name='tau')\n", (4984, 5019), True, 'import pandas as pd\n'), ((5024, 5065), 'seaborn.jointplot', 'sns.jointplot', (['x', 'y'], {'xlim': 'xlim', 'ylim': 'ylim'}), '(x, y, xlim=xlim, ylim=ylim)\n', (5037, 5065), True, 'import seaborn as sns\n'), ((5066, 5090), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""centered"""'], {}), "('centered')\n", (5078, 5090), True, 'import matplotlib.pyplot as plt\n'), ((5091, 5101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5099, 5101), True, 'import matplotlib.pyplot as plt\n'), ((5238, 5283), 'pandas.Series', 'pd.Series', (["noncentered_trace['mu']"], {'name': '"""mu"""'}), "(noncentered_trace['mu'], name='mu')\n", (5247, 5283), True, 'import pandas as pd\n'), ((5289, 5336), 'pandas.Series', 'pd.Series', (["noncentered_trace['tau']"], {'name': '"""tau"""'}), "(noncentered_trace['tau'], name='tau')\n", (5298, 5336), True, 'import pandas as pd\n'), ((5341, 5382), 'seaborn.jointplot', 'sns.jointplot', (['x', 'y'], {'xlim': 'xlim', 'ylim': 'ylim'}), '(x, y, xlim=xlim, ylim=ylim)\n', (5354, 5382), True, 'import seaborn as sns\n'), ((5383, 5410), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""noncentered"""'], {}), "('noncentered')\n", (5395, 5410), True, 'import matplotlib.pyplot as plt\n'), ((5411, 5421), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5419, 5421), True, 'import matplotlib.pyplot as plt\n'), ((5565, 5612), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)'}), '(ncols=2, sharex=True, sharey=True)\n', (5577, 5612), True, 'import matplotlib.pyplot as plt\n'), ((5617, 5659), 'pandas.Series', 'pd.Series', (["centered_trace['mu']"], {'name': '"""mu"""'}), "(centered_trace['mu'], name='mu')\n", (5626, 5659), True, 'import pandas as pd\n'), ((5665, 5717), 'pandas.Series', 'pd.Series', (["centered_trace['log_tau']"], {'name': '"""log_tau"""'}), "(centered_trace['log_tau'], name='log_tau')\n", (5674, 5717), True, 'import pandas as pd\n'), ((5806, 5851), 'pandas.Series', 'pd.Series', (["noncentered_trace['mu']"], {'name': '"""mu"""'}), "(noncentered_trace['mu'], name='mu')\n", (5815, 5851), True, 'import pandas as pd\n'), ((5857, 5912), 'pandas.Series', 'pd.Series', (["noncentered_trace['log_tau']"], {'name': '"""log_tau"""'}), "(noncentered_trace['log_tau'], name='log_tau')\n", (5866, 5912), True, 'import pandas as pd\n'), ((6248, 6290), 'pandas.Series', 'pd.Series', (["centered_trace['mu']"], {'name': '"""mu"""'}), "(centered_trace['mu'], name='mu')\n", (6257, 6290), True, 'import pandas as pd\n'), ((6296, 6348), 'pandas.Series', 'pd.Series', (["centered_trace['log_tau']"], {'name': '"""log_tau"""'}), "(centered_trace['log_tau'], name='log_tau')\n", (6305, 6348), True, 'import pandas as pd\n'), ((6353, 6394), 'seaborn.jointplot', 'sns.jointplot', (['x', 'y'], {'xlim': 'xlim', 'ylim': 'ylim'}), '(x, y, xlim=xlim, ylim=ylim)\n', (6366, 6394), True, 'import seaborn as sns\n'), ((6395, 6419), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""centered"""'], {}), "('centered')\n", (6407, 6419), True, 'import matplotlib.pyplot as plt\n'), ((6420, 6430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6428, 6430), True, 'import matplotlib.pyplot as plt\n'), ((6567, 6612), 'pandas.Series', 'pd.Series', (["noncentered_trace['mu']"], {'name': '"""mu"""'}), "(noncentered_trace['mu'], name='mu')\n", (6576, 6612), True, 'import pandas as pd\n'), ((6618, 6673), 'pandas.Series', 'pd.Series', (["noncentered_trace['log_tau']"], {'name': '"""log_tau"""'}), "(noncentered_trace['log_tau'], name='log_tau')\n", (6627, 6673), True, 'import pandas as pd\n'), ((6678, 6719), 'seaborn.jointplot', 'sns.jointplot', (['x', 'y'], {'xlim': 'xlim', 'ylim': 'ylim'}), '(x, y, xlim=xlim, ylim=ylim)\n', (6691, 6719), True, 'import seaborn as sns\n'), ((6720, 6747), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""noncentered"""'], {}), "('noncentered')\n", (6732, 6747), True, 'import matplotlib.pyplot as plt\n'), ((6748, 6758), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6756, 6758), True, 'import matplotlib.pyplot as plt\n'), ((6891, 7036), 'arviz.plot_forest', 'az.plot_forest', (['[centered_trace, noncentered_trace]'], {'model_names': "['centered', 'noncentered']", 'var_names': '"""theta"""', 'combined': '(True)', 'hdi_prob': '(0.95)'}), "([centered_trace, noncentered_trace], model_names=['centered',\n 'noncentered'], var_names='theta', combined=True, hdi_prob=0.95)\n", (6905, 7036), True, 'import arviz as az\n'), ((2388, 2421), 'numpy.random.normal', 'np.random.normal', (['mu_val', 'tau_val'], {}), '(mu_val, tau_val)\n', (2404, 2421), True, 'import numpy as np\n'), ((2434, 2480), 'numpy.random.normal', 'np.random.normal', (['mus[i]', 'sigma_val'], {'size': 'n[i]'}), '(mus[i], sigma_val, size=n[i])\n', (2450, 2480), True, 'import numpy as np\n'), ((2487, 2508), 'numpy.append', 'np.append', (['x', 'samples'], {}), '(x, samples)\n', (2496, 2508), True, 'import numpy as np\n'), ((2733, 2743), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (2741, 2743), True, 'import pymc3 as pm\n'), ((2790, 2817), 'pymc3.Normal', 'pm.Normal', (['"""mu"""'], {'mu': '(0)', 'sd': '(5)'}), "('mu', mu=0, sd=5)\n", (2799, 2817), True, 'import pymc3 as pm\n'), ((2828, 2858), 'pymc3.HalfCauchy', 'pm.HalfCauchy', (['"""tau"""'], {'beta': '(2.5)'}), "('tau', beta=2.5)\n", (2841, 2858), True, 'import pymc3 as pm\n'), ((2948, 2997), 'pymc3.Normal', 'pm.Normal', (['"""theta"""'], {'mu': 'mu', 'sd': 'tau', 'shape': 'n_groups'}), "('theta', mu=mu, sd=tau, shape=n_groups)\n", (2957, 2997), True, 'import pymc3 as pm\n'), ((3032, 3097), 'pymc3.Normal', 'pm.Normal', (['"""x_obs"""'], {'mu': 'theta[group_ind]', 'sd': 'sigma_val', 'observed': 'x'}), "('x_obs', mu=theta[group_ind], sd=sigma_val, observed=x)\n", (3041, 3097), True, 'import pymc3 as pm\n'), ((3229, 3255), 'pymc3.sample', 'pm.sample', (['(10000)'], {'chains': '(2)'}), '(10000, chains=2)\n', (3238, 3255), True, 'import pymc3 as pm\n'), ((3435, 3445), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (3443, 3445), True, 'import pymc3 as pm\n'), ((3495, 3522), 'pymc3.Normal', 'pm.Normal', (['"""mu"""'], {'mu': '(0)', 'sd': '(5)'}), "('mu', mu=0, sd=5)\n", (3504, 3522), True, 'import pymc3 as pm\n'), ((3533, 3563), 'pymc3.HalfCauchy', 'pm.HalfCauchy', (['"""tau"""'], {'beta': '(2.5)'}), "('tau', beta=2.5)\n", (3546, 3563), True, 'import pymc3 as pm\n'), ((3724, 3774), 'pymc3.Normal', 'pm.Normal', (['"""var_theta"""'], {'mu': '(0)', 'sd': '(1)', 'shape': 'n_groups'}), "('var_theta', mu=0, sd=1, shape=n_groups)\n", (3733, 3774), True, 'import pymc3 as pm\n'), ((3787, 3834), 'pymc3.Deterministic', 'pm.Deterministic', (['"""theta"""', '(mu + var_theta * tau)'], {}), "('theta', mu + var_theta * tau)\n", (3803, 3834), True, 'import pymc3 as pm\n'), ((3869, 3934), 'pymc3.Normal', 'pm.Normal', (['"""x_obs"""'], {'mu': 'theta[group_ind]', 'sd': 'sigma_val', 'observed': 'x'}), "('x_obs', mu=theta[group_ind], sd=sigma_val, observed=x)\n", (3878, 3934), True, 'import pymc3 as pm\n'), ((4075, 4100), 'pymc3.sample', 'pm.sample', (['(1000)'], {'chains': '(2)'}), '(1000, chains=2)\n', (4084, 4100), True, 'import pymc3 as pm\n'), ((2901, 2912), 'theano.tensor.log', 'tt.log', (['tau'], {}), '(tau)\n', (2907, 2912), True, 'import theano.tensor as tt\n'), ((3261, 3287), 'pymc3.summary', 'pm.summary', (['centered_trace'], {}), '(centered_trace)\n', (3271, 3287), True, 'import pymc3 as pm\n'), ((3606, 3617), 'theano.tensor.log', 'tt.log', (['tau'], {}), '(tau)\n', (3612, 3617), True, 'import theano.tensor as tt\n'), ((4106, 4135), 'pymc3.summary', 'pm.summary', (['noncentered_trace'], {}), '(noncentered_trace)\n', (4116, 4135), True, 'import pymc3 as pm\n')] |
import json
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import random
from bokeh.io import output_file, output_notebook, save
from bokeh.plotting import figure, show
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.layouts import row, column, gridplot
from bokeh.models.widgets import Tabs, Panel
from bokeh.plotting import reset_output
json_file = 'PriorityLog.json' #### add file path to json file
csv_file = 'battery_log.csv' #### add file path to desired csv file
out_html = 'filename_brian.html' #### add html file path for final result
always_on = ['EEG','H'] #### sensors in node_struct.csv which should always be on
with open(json_file) as data_file:
j2 = json.load(data_file)
dfa0 = pd.DataFrame()
for n in range(len(j2["PriorityLog"])):
ind = []
l_battery = []
val = j2["PriorityLog"][n]['CurrentPriorityTable']
for i in val:
ind.append(i)
l_battery.append(val[i]['battery'])
ts = j2["PriorityLog"][n]['TimeStamp']
dfa = pd.DataFrame({'TS':[ts for i in ind], 'Sensor':ind, 'Battery_Level':l_battery})
dfa0 = dfa.append(dfa0).copy()
dfa0['Battery_Level'] = dfa0['Battery_Level'].astype(float).round(2)
dfa0 = dfa0.drop_duplicates()
dfa0.to_csv(csv_file, index = False)
df = pd.read_csv(csv_file)
df['TS'] = df['TS'].str.slice(0,19).astype('datetime64[s]')
sensors = list(np.unique(df['Sensor']))
cs = ''
figures = ''
for i in sensors:
cs = cs + "\n\n\n" + "dfs_"+i+" = df[df['Sensor'] == '"+i+"'] \nsource = ColumnDataSource(dfs_"+i+") \np_"+i+" = figure(x_axis_type='datetime', plot_width=500, plot_height=250, title = '"+i+"s Remaining Power') \np_"+i+".line('TS', 'Battery_Level', source=source, color = random.choice(['firebrick','green','blue','orange'])) \np_"+i+".sizing_mode = 'scale_width'; \np_"+i+".toolbar.logo = None; \np_"+i+".toolbar_location = None"
figures = figures + "'p_"+i+"', "
s_fig_list = "fig_list = list(["+figures[:-2]+"])"
exec(cs)
exec(s_fig_list)
s = ''
for i in range(len(fig_list)):
if i%2 == 0:
try:
s = s + "row("+fig_list[i]+","+fig_list[i+1]+"),"
except:
s = s + "row("+fig_list[i]+"),"
struct_s = "save(column(p,"+s[:-1]+"))"
output_file(out_html, mode='inline')
ns = pd.read_csv("node_struct.csv")
val = j2["PriorityLog"][-1]['CurrentPriorityTable']
active_sensor = next(iter(val))
ns['Flag'] = np.where((ns['Label'] == active_sensor) | (ns['Label'].isin(always_on)), 'green', 'navy')
ns['Size'] = np.where(ns['Flag'] == 'green', 30, 15)
source = ColumnDataSource(ns)
p = figure(plot_width = 1000, plot_height = 400, title = 'Topology Multi-Hop')
p.circle('X','Y', color='Flag' , size = 'Size', alpha=0.5, source = source)
labels = LabelSet(x='X', y='Y', text='Label', level='glyph',source=source)
p.toolbar.logo = None
p.toolbar_location = None
p.axis.visible = False
p.add_layout(labels)
exec(struct_s) | [
"pandas.DataFrame",
"bokeh.models.ColumnDataSource",
"bokeh.plotting.figure",
"json.load",
"pandas.read_csv",
"bokeh.io.output_file",
"numpy.where",
"numpy.unique",
"bokeh.models.LabelSet"
] | [((772, 786), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (784, 786), True, 'import pandas as pd\n'), ((1349, 1370), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (1360, 1370), True, 'import pandas as pd\n'), ((2319, 2355), 'bokeh.io.output_file', 'output_file', (['out_html'], {'mode': '"""inline"""'}), "(out_html, mode='inline')\n", (2330, 2355), False, 'from bokeh.io import output_file, output_notebook, save\n'), ((2364, 2394), 'pandas.read_csv', 'pd.read_csv', (['"""node_struct.csv"""'], {}), "('node_struct.csv')\n", (2375, 2394), True, 'import pandas as pd\n'), ((2600, 2639), 'numpy.where', 'np.where', (["(ns['Flag'] == 'green')", '(30)', '(15)'], {}), "(ns['Flag'] == 'green', 30, 15)\n", (2608, 2639), True, 'import numpy as np\n'), ((2652, 2672), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['ns'], {}), '(ns)\n', (2668, 2672), False, 'from bokeh.models import ColumnDataSource, LabelSet\n'), ((2678, 2746), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(1000)', 'plot_height': '(400)', 'title': '"""Topology Multi-Hop"""'}), "(plot_width=1000, plot_height=400, title='Topology Multi-Hop')\n", (2684, 2746), False, 'from bokeh.plotting import figure, show\n'), ((2840, 2906), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""X"""', 'y': '"""Y"""', 'text': '"""Label"""', 'level': '"""glyph"""', 'source': 'source'}), "(x='X', y='Y', text='Label', level='glyph', source=source)\n", (2848, 2906), False, 'from bokeh.models import ColumnDataSource, LabelSet\n'), ((741, 761), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (750, 761), False, 'import json\n'), ((1066, 1152), 'pandas.DataFrame', 'pd.DataFrame', (["{'TS': [ts for i in ind], 'Sensor': ind, 'Battery_Level': l_battery}"], {}), "({'TS': [ts for i in ind], 'Sensor': ind, 'Battery_Level':\n l_battery})\n", (1078, 1152), True, 'import pandas as pd\n'), ((1450, 1473), 'numpy.unique', 'np.unique', (["df['Sensor']"], {}), "(df['Sensor'])\n", (1459, 1473), True, 'import numpy as np\n')] |
import numpy as np
import shapely.geometry as shgeo
from .transforms import bbox2type
from .utils import get_bbox_type
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
assert mode in ['iou', 'iof']
assert get_bbox_type(bboxes1) != 'notype'
assert get_bbox_type(bboxes2) != 'notype'
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
if is_aligned:
assert rows == cols
if rows * cols == 0:
return np.zeros((rows, 1), dtype=np.float32) \
if is_aligned else np.zeros((rows, cols), dtype=np.float32)
hbboxes1 = bbox2type(bboxes1, 'hbb')
hbboxes2 = bbox2type(bboxes2, 'hbb')
if not is_aligned:
hbboxes1 = hbboxes1[:, None, :]
lt = np.maximum(hbboxes1[..., :2], hbboxes2[..., :2])
rb = np.minimum(hbboxes1[..., 2:], hbboxes2[..., 2:])
wh = np.clip(rb - lt, 0, np.inf)
h_overlaps = wh[..., 0] * wh[..., 1]
if get_bbox_type(bboxes1) == 'hbb' and get_bbox_type(bboxes2) == 'hbb':
overlaps = h_overlaps
areas1 = (hbboxes1[..., 2] - hbboxes1[..., 0]) * (
hbboxes1[..., 3] - hbboxes1[..., 1])
if mode == 'iou':
areas2 = (hbboxes2[..., 2] - hbboxes2[..., 0]) * (
hbboxes2[..., 3] - hbboxes2[..., 1])
unions = areas1 + areas2 - overlaps
else:
unions = areas1
else:
polys1 = bbox2type(bboxes1, 'poly')
polys2 = bbox2type(bboxes2, 'poly')
sg_polys1 = [shgeo.Polygon(p) for p in polys1.reshape(rows, -1, 2)]
sg_polys2 = [shgeo.Polygon(p) for p in polys2.reshape(cols, -1, 2)]
overlaps = np.zeros(h_overlaps.shape)
for p in zip(*np.nonzero(h_overlaps)):
overlaps[p] = sg_polys1[p[0]].intersection(sg_polys2[p[-1]]).area
if mode == 'iou':
unions = np.zeros(h_overlaps.shape, dtype=np.float32)
for p in zip(*np.nonzero(h_overlaps)):
unions[p] = sg_polys1[p[0]].union(sg_polys2[p[-1]]).area
else:
unions = np.array([p.area for p in sg_polys1], dtype=np.float32)
if not is_aligned:
unions = unions[..., None]
unions = np.clip(unions, eps, np.inf)
outputs = overlaps / unions
if outputs.ndim == 1:
outputs = outputs[..., None]
return outputs
def bbox_areas(bboxes):
bbox_type = get_bbox_type(bboxes)
assert bbox_type != 'notype'
if bbox_type == 'hbb':
areas = (bboxes[..., 2] - bboxes[..., 0]) * (
bboxes[..., 3] - bboxes[..., 1])
if bbox_type == 'obb':
areas = bboxes[..., 2] * bboxes[..., 3]
if bbox_type == 'poly':
areas = np.zeros(bboxes.shape[:-1], dtype=np.float32)
bboxes = bboxes.reshape(*bboxes.shape[:-1], 4, 2)
for i in range(4):
areas += 0.5 * (bboxes[..., i, 0] * bboxes[..., (i+1)%4, 1] -
bboxes[..., (i+1)%4, 0] * bboxes[..., i, 1])
areas = np.abs(areas)
return areas
def bbox_nms(bboxes, scores, iou_thr=0.5, score_thr=0.01):
assert get_bbox_type(bboxes) != 'notype'
order = scores.argsort()[::-1]
order = order[scores[order] > score_thr]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
keep_bbox = bboxes[[i]]
other_bboxes = bboxes[order[1:]]
ious = bbox_overlaps(keep_bbox, other_bboxes)
idx = np.where(ious <= iou_thr)[1]
order = order[idx + 1]
return np.array(keep, dtype=np.int64)
def bbox_area_nms(bboxes, iou_thr=0.5):
assert get_bbox_type(bboxes) != 'notype'
areas = bbox_areas(bboxes)
order = areas.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
keep_bbox = bboxes[[i]]
other_bboxes = bboxes[order[1:]]
ious = bbox_overlaps(keep_bbox, other_bboxes)
idx = np.where(ious <= iou_thr)[1]
order = order[idx + 1]
return np.array(keep, dtype=np.int64)
| [
"numpy.minimum",
"numpy.maximum",
"numpy.abs",
"shapely.geometry.Polygon",
"numpy.zeros",
"numpy.clip",
"numpy.nonzero",
"numpy.where",
"numpy.array"
] | [((740, 788), 'numpy.maximum', 'np.maximum', (['hbboxes1[..., :2]', 'hbboxes2[..., :2]'], {}), '(hbboxes1[..., :2], hbboxes2[..., :2])\n', (750, 788), True, 'import numpy as np\n'), ((798, 846), 'numpy.minimum', 'np.minimum', (['hbboxes1[..., 2:]', 'hbboxes2[..., 2:]'], {}), '(hbboxes1[..., 2:], hbboxes2[..., 2:])\n', (808, 846), True, 'import numpy as np\n'), ((856, 883), 'numpy.clip', 'np.clip', (['(rb - lt)', '(0)', 'np.inf'], {}), '(rb - lt, 0, np.inf)\n', (863, 883), True, 'import numpy as np\n'), ((2193, 2221), 'numpy.clip', 'np.clip', (['unions', 'eps', 'np.inf'], {}), '(unions, eps, np.inf)\n', (2200, 2221), True, 'import numpy as np\n'), ((3492, 3522), 'numpy.array', 'np.array', (['keep'], {'dtype': 'np.int64'}), '(keep, dtype=np.int64)\n', (3500, 3522), True, 'import numpy as np\n'), ((3975, 4005), 'numpy.array', 'np.array', (['keep'], {'dtype': 'np.int64'}), '(keep, dtype=np.int64)\n', (3983, 4005), True, 'import numpy as np\n'), ((1645, 1671), 'numpy.zeros', 'np.zeros', (['h_overlaps.shape'], {}), '(h_overlaps.shape)\n', (1653, 1671), True, 'import numpy as np\n'), ((2681, 2726), 'numpy.zeros', 'np.zeros', (['bboxes.shape[:-1]'], {'dtype': 'np.float32'}), '(bboxes.shape[:-1], dtype=np.float32)\n', (2689, 2726), True, 'import numpy as np\n'), ((2975, 2988), 'numpy.abs', 'np.abs', (['areas'], {}), '(areas)\n', (2981, 2988), True, 'import numpy as np\n'), ((469, 506), 'numpy.zeros', 'np.zeros', (['(rows, 1)'], {'dtype': 'np.float32'}), '((rows, 1), dtype=np.float32)\n', (477, 506), True, 'import numpy as np\n'), ((544, 584), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {'dtype': 'np.float32'}), '((rows, cols), dtype=np.float32)\n', (552, 584), True, 'import numpy as np\n'), ((1494, 1510), 'shapely.geometry.Polygon', 'shgeo.Polygon', (['p'], {}), '(p)\n', (1507, 1510), True, 'import shapely.geometry as shgeo\n'), ((1570, 1586), 'shapely.geometry.Polygon', 'shgeo.Polygon', (['p'], {}), '(p)\n', (1583, 1586), True, 'import shapely.geometry as shgeo\n'), ((1845, 1889), 'numpy.zeros', 'np.zeros', (['h_overlaps.shape'], {'dtype': 'np.float32'}), '(h_overlaps.shape, dtype=np.float32)\n', (1853, 1889), True, 'import numpy as np\n'), ((2049, 2104), 'numpy.array', 'np.array', (['[p.area for p in sg_polys1]'], {'dtype': 'np.float32'}), '([p.area for p in sg_polys1], dtype=np.float32)\n', (2057, 2104), True, 'import numpy as np\n'), ((3420, 3445), 'numpy.where', 'np.where', (['(ious <= iou_thr)'], {}), '(ious <= iou_thr)\n', (3428, 3445), True, 'import numpy as np\n'), ((3903, 3928), 'numpy.where', 'np.where', (['(ious <= iou_thr)'], {}), '(ious <= iou_thr)\n', (3911, 3928), True, 'import numpy as np\n'), ((1694, 1716), 'numpy.nonzero', 'np.nonzero', (['h_overlaps'], {}), '(h_overlaps)\n', (1704, 1716), True, 'import numpy as np\n'), ((1916, 1938), 'numpy.nonzero', 'np.nonzero', (['h_overlaps'], {}), '(h_overlaps)\n', (1926, 1938), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import collections
import glob
import os
import os.path as osp
import shutil
import numpy as np
here = osp.dirname(osp.abspath(__file__))
def main():
dataset_dir = osp.join(here, 'dataset_data/20180204')
splits_dir = osp.join(here, 'dataset_data/20180204_splits')
if osp.exists(splits_dir):
print('splits_dir already exists: %s' % splits_dir)
return
os.makedirs(splits_dir)
shutil.copy(
osp.join(dataset_dir, 'class_names.txt'),
osp.join(splits_dir, 'class_names.txt'))
object_freq_by_video = {class_id: 0 for class_id in range(1, 41)}
object_freq_by_frame = {class_id: 0 for class_id in range(1, 41)}
videos = []
for video_id in sorted(os.listdir(dataset_dir)):
video_dir = osp.join(dataset_dir, video_id)
if not osp.isdir(video_dir):
continue
npz_files = sorted(glob.glob(osp.join(video_dir, '*.npz')))
n_frames = 0
class_ids_in_video = set()
for npz_file in npz_files:
lbl_cls = np.load(npz_file)['lbl_cls']
class_ids = np.unique(lbl_cls)
keep = ~np.isin(class_ids, [-1, 0])
class_ids = class_ids[keep]
for class_id in class_ids:
object_freq_by_frame[class_id] += 1
class_ids_in_video.add(class_id)
n_frames += 1
for class_id in class_ids_in_video:
object_freq_by_video[class_id] += 1
videos.append(dict(
id=video_id,
dir=video_dir,
class_ids=list(sorted(class_ids_in_video)),
n_objects=len(class_ids_in_video),
n_frames=n_frames,
))
print('# of videos: %d' % len(videos))
# RANSAC to split Train/Test
ratio_train = 0.66
n_train = int(ratio_train * len(videos))
while True:
p = np.random.permutation(len(videos))
indices_train = p[:n_train]
indices_test = p[n_train:]
videos_train = [videos[i] for i in indices_train]
videos_test = [videos[i] for i in indices_test]
class_ids = []
for video in videos_train:
class_ids.extend(video['class_ids'])
count = collections.Counter(class_ids)
count_values_unique = set(count.values())
if not count_values_unique.issubset({3, 4, 5}):
continue
mean_count_ideal = 7 * ratio_train
mean_count = 1. * sum(count.values()) / len(count)
if abs(mean_count - mean_count_ideal) > 0.1:
continue
break
print('Mean Count (Ideal): %f' % mean_count_ideal)
print('Mean Count: %f' % mean_count)
# print(count_values_unique)
# print(count.values())
print('Videos Train: %s' % sorted([v['id'] for v in videos_train]))
print('Videos Test: %s' % sorted([v['id'] for v in videos_test]))
split_dir = osp.join(splits_dir, 'train')
os.makedirs(split_dir)
for video in videos_train:
shutil.copytree(
video['dir'], osp.join(split_dir, osp.basename(video['dir'])))
split_dir = osp.join(splits_dir, 'test')
os.makedirs(split_dir)
for video in videos_test:
shutil.copytree(
video['dir'], osp.join(split_dir, osp.basename(video['dir'])))
print('Splitted dataset: %s' % splits_dir)
if __name__ == '__main__':
main()
| [
"numpy.isin",
"os.path.abspath",
"numpy.load",
"os.makedirs",
"os.path.basename",
"os.path.isdir",
"os.path.exists",
"collections.Counter",
"os.path.join",
"os.listdir",
"numpy.unique"
] | [((141, 162), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (152, 162), True, 'import os.path as osp\n'), ((196, 235), 'os.path.join', 'osp.join', (['here', '"""dataset_data/20180204"""'], {}), "(here, 'dataset_data/20180204')\n", (204, 235), True, 'import os.path as osp\n'), ((253, 299), 'os.path.join', 'osp.join', (['here', '"""dataset_data/20180204_splits"""'], {}), "(here, 'dataset_data/20180204_splits')\n", (261, 299), True, 'import os.path as osp\n'), ((308, 330), 'os.path.exists', 'osp.exists', (['splits_dir'], {}), '(splits_dir)\n', (318, 330), True, 'import os.path as osp\n'), ((411, 434), 'os.makedirs', 'os.makedirs', (['splits_dir'], {}), '(splits_dir)\n', (422, 434), False, 'import os\n'), ((2888, 2917), 'os.path.join', 'osp.join', (['splits_dir', '"""train"""'], {}), "(splits_dir, 'train')\n", (2896, 2917), True, 'import os.path as osp\n'), ((2922, 2944), 'os.makedirs', 'os.makedirs', (['split_dir'], {}), '(split_dir)\n', (2933, 2944), False, 'import os\n'), ((3092, 3120), 'os.path.join', 'osp.join', (['splits_dir', '"""test"""'], {}), "(splits_dir, 'test')\n", (3100, 3120), True, 'import os.path as osp\n'), ((3125, 3147), 'os.makedirs', 'os.makedirs', (['split_dir'], {}), '(split_dir)\n', (3136, 3147), False, 'import os\n'), ((460, 500), 'os.path.join', 'osp.join', (['dataset_dir', '"""class_names.txt"""'], {}), "(dataset_dir, 'class_names.txt')\n", (468, 500), True, 'import os.path as osp\n'), ((510, 549), 'os.path.join', 'osp.join', (['splits_dir', '"""class_names.txt"""'], {}), "(splits_dir, 'class_names.txt')\n", (518, 549), True, 'import os.path as osp\n'), ((735, 758), 'os.listdir', 'os.listdir', (['dataset_dir'], {}), '(dataset_dir)\n', (745, 758), False, 'import os\n'), ((781, 812), 'os.path.join', 'osp.join', (['dataset_dir', 'video_id'], {}), '(dataset_dir, video_id)\n', (789, 812), True, 'import os.path as osp\n'), ((2220, 2250), 'collections.Counter', 'collections.Counter', (['class_ids'], {}), '(class_ids)\n', (2239, 2250), False, 'import collections\n'), ((828, 848), 'os.path.isdir', 'osp.isdir', (['video_dir'], {}), '(video_dir)\n', (837, 848), True, 'import os.path as osp\n'), ((1108, 1126), 'numpy.unique', 'np.unique', (['lbl_cls'], {}), '(lbl_cls)\n', (1117, 1126), True, 'import numpy as np\n'), ((909, 937), 'os.path.join', 'osp.join', (['video_dir', '"""*.npz"""'], {}), "(video_dir, '*.npz')\n", (917, 937), True, 'import os.path as osp\n'), ((1054, 1071), 'numpy.load', 'np.load', (['npz_file'], {}), '(npz_file)\n', (1061, 1071), True, 'import numpy as np\n'), ((1147, 1174), 'numpy.isin', 'np.isin', (['class_ids', '[-1, 0]'], {}), '(class_ids, [-1, 0])\n', (1154, 1174), True, 'import numpy as np\n'), ((3047, 3073), 'os.path.basename', 'osp.basename', (["video['dir']"], {}), "(video['dir'])\n", (3059, 3073), True, 'import os.path as osp\n'), ((3249, 3275), 'os.path.basename', 'osp.basename', (["video['dir']"], {}), "(video['dir'])\n", (3261, 3275), True, 'import os.path as osp\n')] |
import sys
import pygame
import numpy as np
from pygame.locals import *
from env.color import Colors
from env.pixel import Pixel
from env.snake import Snake
from env.apple import Apple
from env.environment import Environment
from env.config import *
class SnakeGame(object):
def __init__(self, is_tick=False):
pygame.init()
global screen, FPS
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
FPS = pygame.time.Clock()
self.is_tick = is_tick
self._build_enviroment()
def _build_enviroment(self):
self.environment = Environment(SCREEN_WIDTH, SCREEN_HEIGHT, PIXEL_SIZE)
self.snake = Snake()
self.apple = Apple()
self.apple.reposition(self.snake)
self.score = 0
@property
def observation_shape(self):
return np.shape(self.environment.pixels)
def new_round(self):
self._build_enviroment()
feedback = Feedback(
observation=np.copy(self.environment.pixels),
reward=0,
game_over=False
)
return feedback
def step(self, action):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if self.is_tick:
FPS.tick(10)
if action == MOVE_ON:
self.snake.move()
elif action == TURN_LEFT:
self.snake.turn_left()
elif action == TURN_RIGHT:
self.snake.turn_right()
eat_apple = self.eat_apple(self.snake, self.apple)
game_over = self.game_is_over(self.snake)
if game_over is False:
self.render()
reward = 1 if eat_apple is True else 0
if game_over:
reward = -1
feedback = Feedback(
observation=np.copy(self.environment.pixels),
reward=reward,
game_over=game_over
)
return feedback
@property
def actions_num(self):
return len(SNAKE_ACTIONS)
@property
def current_score(self):
return self.score
def draw_node(self, x, y, px):
rect = pygame.Rect(x * PIXEL_SIZE, y * PIXEL_SIZE, PIXEL_SIZE, PIXEL_SIZE)
if px == Pixel.WALL:
pygame.draw.rect(screen, Colors.WALL, rect)
elif px == Pixel.APPLE:
pygame.draw.rect(screen, Colors.APPLE, rect)
elif px == Pixel.SNAKE_HEAD:
pygame.draw.rect(screen, Colors.SNAKE_HEAD, rect)
elif px == Pixel.SNAKE_BODY:
pygame.draw.rect(screen, Colors.SNAKE_BODY, rect)
def draw_environment(self, environment):
screen.fill((Colors.BLANK))
w, h = environment.shape
for i in range(w):
for j in range(h):
self.draw_node(i, j, environment.read_pixel(i, j))
def render(self):
self.update_enviroment(self.snake,self.apple, self.environment)
self.draw_environment(self.environment)
pygame.display.update()
def update_enviroment(self, snake, apple, environment):
environment.reset()
for px in snake.body:
environment.write_pixel(Pixel(px.x, px.y), Pixel.SNAKE_BODY)
environment.write_pixel(snake.head, Pixel.SNAKE_HEAD)
environment.write_pixel(Pixel(apple.location.x, apple.location.y), Pixel.APPLE)
def eat_apple(self, snake, apple):
if snake.head.x == apple.location.x and snake.head.y == apple.location.y:
snake.growup()
apple.reposition(snake)
self.score += 1
return True
return False
def game_is_over(self, snake):
if snake.head.x * PIXEL_SIZE < WALL_THICKNESS * PIXEL_SIZE or snake.head.x * PIXEL_SIZE >= SCREEN_WIDTH - PIXEL_SIZE or snake.head.y * PIXEL_SIZE < WALL_THICKNESS * PIXEL_SIZE or snake.head.y * PIXEL_SIZE >= SCREEN_HEIGHT - PIXEL_SIZE:
return True
else:
for part in snake.body[1:]:
if part == snake.head:
return True
return False
def gameOver(self):
screen.fill((0, 0, 0))
fontObj = pygame.font.Font('freesansbold.ttf', 20)
textSurfaceObj1 = fontObj.render('Game over!', True, (255, 0, 0))
textRectObj1 = textSurfaceObj1.get_rect()
textRectObj1.center = (SCREEN_WIDTH / 3, SCREEN_HEIGHT / 3)
screen.blit(textSurfaceObj1, textRectObj1)
textSurfaceObj2 = fontObj.render('Score: %s' % self.score, True, (255, 0, 0))
textRectObj2 = textSurfaceObj2.get_rect()
textRectObj2.center = (SCREEN_WIDTH*2/3, SCREEN_HEIGHT*2/3)
screen.blit(textSurfaceObj2, textRectObj2)
pygame.display.update()
over = True
while(over):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
def destroy(self):
pygame.quit()
sys.exit()
class Feedback(object):
def __init__(self, observation, reward, game_over):
self.observation = observation,
self.reward = reward,
self.game_over = game_over | [
"pygame.quit",
"numpy.copy",
"pygame.event.get",
"pygame.display.set_mode",
"env.apple.Apple",
"pygame.Rect",
"pygame.draw.rect",
"pygame.init",
"env.snake.Snake",
"numpy.shape",
"pygame.display.update",
"env.pixel.Pixel",
"pygame.font.Font",
"sys.exit",
"pygame.time.Clock",
"env.envir... | [((324, 337), 'pygame.init', 'pygame.init', ([], {}), '()\n', (335, 337), False, 'import pygame\n'), ((382, 436), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(SCREEN_WIDTH, SCREEN_HEIGHT)'], {}), '((SCREEN_WIDTH, SCREEN_HEIGHT))\n', (405, 436), False, 'import pygame\n'), ((451, 470), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (468, 470), False, 'import pygame\n'), ((597, 649), 'env.environment.Environment', 'Environment', (['SCREEN_WIDTH', 'SCREEN_HEIGHT', 'PIXEL_SIZE'], {}), '(SCREEN_WIDTH, SCREEN_HEIGHT, PIXEL_SIZE)\n', (608, 649), False, 'from env.environment import Environment\n'), ((671, 678), 'env.snake.Snake', 'Snake', ([], {}), '()\n', (676, 678), False, 'from env.snake import Snake\n'), ((700, 707), 'env.apple.Apple', 'Apple', ([], {}), '()\n', (705, 707), False, 'from env.apple import Apple\n'), ((840, 873), 'numpy.shape', 'np.shape', (['self.environment.pixels'], {}), '(self.environment.pixels)\n', (848, 873), True, 'import numpy as np\n'), ((1154, 1172), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1170, 1172), False, 'import pygame\n'), ((2183, 2250), 'pygame.Rect', 'pygame.Rect', (['(x * PIXEL_SIZE)', '(y * PIXEL_SIZE)', 'PIXEL_SIZE', 'PIXEL_SIZE'], {}), '(x * PIXEL_SIZE, y * PIXEL_SIZE, PIXEL_SIZE, PIXEL_SIZE)\n', (2194, 2250), False, 'import pygame\n'), ((3018, 3041), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3039, 3041), False, 'import pygame\n'), ((4170, 4210), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(20)'], {}), "('freesansbold.ttf', 20)\n", (4186, 4210), False, 'import pygame\n'), ((4719, 4742), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4740, 4742), False, 'import pygame\n'), ((4967, 4980), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4978, 4980), False, 'import pygame\n'), ((4989, 4999), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4997, 4999), False, 'import sys\n'), ((2292, 2335), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'Colors.WALL', 'rect'], {}), '(screen, Colors.WALL, rect)\n', (2308, 2335), False, 'import pygame\n'), ((3328, 3369), 'env.pixel.Pixel', 'Pixel', (['apple.location.x', 'apple.location.y'], {}), '(apple.location.x, apple.location.y)\n', (3333, 3369), False, 'from env.pixel import Pixel\n'), ((4810, 4828), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4826, 4828), False, 'import pygame\n'), ((986, 1018), 'numpy.copy', 'np.copy', (['self.environment.pixels'], {}), '(self.environment.pixels)\n', (993, 1018), True, 'import numpy as np\n'), ((1225, 1238), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1236, 1238), False, 'import pygame\n'), ((1255, 1265), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1263, 1265), False, 'import sys\n'), ((1850, 1882), 'numpy.copy', 'np.copy', (['self.environment.pixels'], {}), '(self.environment.pixels)\n', (1857, 1882), True, 'import numpy as np\n'), ((2380, 2424), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'Colors.APPLE', 'rect'], {}), '(screen, Colors.APPLE, rect)\n', (2396, 2424), False, 'import pygame\n'), ((3197, 3214), 'env.pixel.Pixel', 'Pixel', (['px.x', 'px.y'], {}), '(px.x, px.y)\n', (3202, 3214), False, 'from env.pixel import Pixel\n'), ((2474, 2523), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'Colors.SNAKE_HEAD', 'rect'], {}), '(screen, Colors.SNAKE_HEAD, rect)\n', (2490, 2523), False, 'import pygame\n'), ((4889, 4902), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4900, 4902), False, 'import pygame\n'), ((4923, 4933), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4931, 4933), False, 'import sys\n'), ((2573, 2622), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'Colors.SNAKE_BODY', 'rect'], {}), '(screen, Colors.SNAKE_BODY, rect)\n', (2589, 2622), False, 'import pygame\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
data = np.loadtxt("datos.dat")
fig = plt.figure(figsize = (15,7))
plt.subplot(1,2,1)
x = np.arange(0,1,0.01)
y = np.arange(0,1,0.01)
# ax = Axes3D(fig)
# ax.plot_trisurf(x,y, data)
plt.plot(x, data[0:100,0]/100)
plt.subplot(1,2,2)
plt.plot(x, data[0:100,0], label = "Inicial")
plt.plot(x, data[0:100,3], label = "Final")
plt.xlim(0,1)
plt.ylim(-1,1)
plt.savefig("fig.png") | [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.loadtxt",
"matplotlib.pyplot.savefig"
] | [((99, 122), 'numpy.loadtxt', 'np.loadtxt', (['"""datos.dat"""'], {}), "('datos.dat')\n", (109, 122), True, 'import numpy as np\n'), ((130, 157), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 7)'}), '(figsize=(15, 7))\n', (140, 157), True, 'import matplotlib.pyplot as plt\n'), ((160, 180), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (171, 180), True, 'import matplotlib.pyplot as plt\n'), ((184, 205), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (193, 205), True, 'import numpy as np\n'), ((208, 229), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (217, 229), True, 'import numpy as np\n'), ((277, 310), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(data[0:100, 0] / 100)'], {}), '(x, data[0:100, 0] / 100)\n', (285, 310), True, 'import matplotlib.pyplot as plt\n'), ((310, 330), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (321, 330), True, 'import matplotlib.pyplot as plt\n'), ((329, 373), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'data[0:100, 0]'], {'label': '"""Inicial"""'}), "(x, data[0:100, 0], label='Inicial')\n", (337, 373), True, 'import matplotlib.pyplot as plt\n'), ((375, 417), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'data[0:100, 3]'], {'label': '"""Final"""'}), "(x, data[0:100, 3], label='Final')\n", (383, 417), True, 'import matplotlib.pyplot as plt\n'), ((420, 434), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (428, 434), True, 'import matplotlib.pyplot as plt\n'), ((434, 449), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (442, 449), True, 'import matplotlib.pyplot as plt\n'), ((450, 472), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig.png"""'], {}), "('fig.png')\n", (461, 472), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import numpy as np
DELIM = ':'
class HandModel(object):
def __init__(self, parents, base_relatives, inverse_base_absolutes, triangles, base_positions, weights, nbones):
self.nbones = nbones
self.parents = parents
self.base_relatives = base_relatives
self.inverse_base_absolutes = inverse_base_absolutes
self.base_positions = base_positions
self.weights = weights
self.triangles = triangles
self.is_mirrored = False
class HandData(object):
def __init__(self, model, correspondences, points):
self.model = model
self.correspondences = correspondences
self.points = points
def load_model(path):
# Read in triangle info.
triangles = np.loadtxt(os.path.join(
path, 'triangles.txt'), int, delimiter=DELIM)
# Process bones file.
bones_path = os.path.join(path, 'bones.txt')
# Grab bone names.
bone_names = [line.split(DELIM)[0] for line in open(bones_path)]
# Grab bone parent indices.
parents = np.loadtxt(bones_path, int, usecols=[
1], delimiter=DELIM).flatten()
# Grab relative transforms.
relative_transforms = np.loadtxt(bones_path, usecols=range(
2, 2 + 16), delimiter=DELIM).reshape(len(parents), 4, 4)
def to_floats(atoms):
return [float(atom) for atom in atoms]
vertices_path = os.path.join(path, 'vertices.txt')
n_bones = len(bone_names)
# Find number of vertices.
with open(vertices_path) as handle:
n_verts = len(handle.readlines())
# Read in vertex info.
positions = np.zeros((n_verts, 3))
weights = np.zeros((n_verts, n_bones))
with open(vertices_path) as handle:
for i_vert, line in enumerate(handle):
atoms = line.split(DELIM)
positions[i_vert] = to_floats(atoms[:3])
for i in range(int(atoms[8])):
i_bone = int(atoms[9 + i * 2])
weights[i_vert, i_bone] = float(atoms[9 + i * 2 + 1])
# Grab absolute invers transforms.
inverse_absolute_transforms = np.loadtxt(bones_path, usecols=range(
2 + 16, 2 + 16 + 16), delimiter=DELIM).reshape(len(parents), 4, 4)
n_vertices = positions.shape[0]
homogeneous_base_positions = np.ones((n_vertices, 4))
homogeneous_base_positions[:, :3] = positions
result = HandModel(parents, relative_transforms, inverse_absolute_transforms,
triangles, homogeneous_base_positions, weights, n_bones)
return result
def read_hand_instance(model_dir, fn, read_us):
model = load_model(model_dir)
fid = open(fn, "r")
line = fid.readline()
line = line.split()
npts = int(line[0])
ntheta = int(line[1])
lines = [fid.readline().split() for i in range(npts)]
correspondences = np.array([int(line[0]) for line in lines])
points = np.array([[float(line[i])
for i in range(1, len(line))] for line in lines])
if read_us:
us = np.array([[float(elem) for elem in fid.readline().split()]
for i_pt in range(npts)])
params = np.array([float(fid.readline()) for i in range(ntheta)])
fid.close()
data = HandData(model, correspondences, points)
if read_us:
return params, us, data
else:
return params, data
def write_J(fn, J):
fid = open(fn, "w")
print("%i %i" % (J.shape[0], J.shape[1]), file=fid)
line = ""
for row in J:
for elem in row:
line += ("%f " % elem)
line += "\n"
print(line, file=fid)
fid.close()
| [
"numpy.zeros",
"numpy.loadtxt",
"os.path.join",
"numpy.ones"
] | [((950, 981), 'os.path.join', 'os.path.join', (['path', '"""bones.txt"""'], {}), "(path, 'bones.txt')\n", (962, 981), False, 'import os\n'), ((1473, 1507), 'os.path.join', 'os.path.join', (['path', '"""vertices.txt"""'], {}), "(path, 'vertices.txt')\n", (1485, 1507), False, 'import os\n'), ((1697, 1719), 'numpy.zeros', 'np.zeros', (['(n_verts, 3)'], {}), '((n_verts, 3))\n', (1705, 1719), True, 'import numpy as np\n'), ((1734, 1762), 'numpy.zeros', 'np.zeros', (['(n_verts, n_bones)'], {}), '((n_verts, n_bones))\n', (1742, 1762), True, 'import numpy as np\n'), ((2361, 2385), 'numpy.ones', 'np.ones', (['(n_vertices, 4)'], {}), '((n_vertices, 4))\n', (2368, 2385), True, 'import numpy as np\n'), ((838, 873), 'os.path.join', 'os.path.join', (['path', '"""triangles.txt"""'], {}), "(path, 'triangles.txt')\n", (850, 873), False, 'import os\n'), ((1122, 1179), 'numpy.loadtxt', 'np.loadtxt', (['bones_path', 'int'], {'usecols': '[1]', 'delimiter': 'DELIM'}), '(bones_path, int, usecols=[1], delimiter=DELIM)\n', (1132, 1179), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.audio.transforms as audio
from mindspore import log as logger
def count_unequal_element(data_expected, data_me, rtol, atol):
assert data_expected.shape == data_me.shape
total_count = len(data_expected.flatten())
error = np.abs(data_expected - data_me)
greater = np.greater(error, atol + np.abs(data_expected) * rtol)
loss_count = np.count_nonzero(greater)
assert (loss_count / total_count) < rtol, \
"\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}". \
format(data_expected[greater], data_me[greater], error[greater])
def test_func_biquad_eager():
""" mindspore eager mode normal testcase:biquad op"""
# Original waveform
waveform = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[0.0100, 0.0388, 0.1923],
[0.0400, 0.1252, 0.6530]], dtype=np.float64)
biquad_op = audio.Biquad(0.01, 0.02, 0.13, 1, 0.12, 0.3)
# Filtered waveform by biquad
output = biquad_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_func_biquad_pipeline():
""" mindspore pipeline mode normal testcase:biquad op"""
# Original waveform
waveform = np.array([[3.2, 2.1, 1.3], [6.2, 5.3, 6]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[1.0000, 1.0000, 0.5844],
[1.0000, 1.0000, 1.0000]], dtype=np.float64)
dataset = ds.NumpySlicesDataset(waveform, ["audio"], shuffle=False)
biquad_op = audio.Biquad(1, 0.02, 0.13, 1, 0.12, 0.3)
# Filtered waveform by biquad
dataset = dataset.map(input_columns=["audio"], operations=biquad_op, num_parallel_workers=8)
i = 0
for item in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
count_unequal_element(expect_waveform[i, :],
item['audio'], 0.0001, 0.0001)
i += 1
def test_biquad_invalid_input():
def test_invalid_input(test_name, b0, b1, b2, a0, a1, a2, error, error_msg):
logger.info("Test Biquad with bad input: {0}".format(test_name))
with pytest.raises(error) as error_info:
audio.Biquad(b0, b1, b2, a0, a1, a2)
assert error_msg in str(error_info.value)
test_invalid_input("invalid b0 parameter type as a String", "0.01", 0.02, 0.13, 1, 0.12, 0.3, TypeError,
"Argument b0 with value 0.01 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid b0 parameter value", 441324343243242342345300, 0.02, 0.13, 1, 0.12, 0.3, ValueError,
"Input b0 is not within the required interval of [-16777216, 16777216].")
test_invalid_input("invalid b1 parameter type as a String", 0.01, "0.02", 0.13, 0, 0.12, 0.3, TypeError,
"Argument b1 with value 0.02 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid b1 parameter value", 0.01, 441324343243242342345300, 0.13, 1, 0.12, 0.3, ValueError,
"Input b1 is not within the required interval of [-16777216, 16777216].")
test_invalid_input("invalid b2 parameter type as a String", 0.01, 0.02, "0.13", 0, 0.12, 0.3, TypeError,
"Argument b2 with value 0.13 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid b2 parameter value", 0.01, 0.02, 441324343243242342345300, 1, 0.12, 0.3, ValueError,
"Input b2 is not within the required interval of [-16777216, 16777216].")
test_invalid_input("invalid a0 parameter type as a String", 0.01, 0.02, 0.13, '1', 0.12, 0.3, TypeError,
"Argument a0 with value 1 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid a0 parameter value", 0.01, 0.02, 0.13, 0, 0.12, 0.3, ValueError,
"Input a0 is not within the required interval of [-16777216, 0) and (0, 16777216].")
test_invalid_input("invalid a0 parameter value", 0.01, 0.02, 0.13, 441324343243242342345300, 0.12, 0.3, ValueError,
"Input a0 is not within the required interval of [-16777216, 0) and (0, 16777216].")
test_invalid_input("invalid a1 parameter type as a String", 0.01, 0.02, 0.13, 1, '0.12', 0.3, TypeError,
"Argument a1 with value 0.12 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid a1 parameter value", 0.01, 0.02, 0.13, 1, 441324343243242342345300, 0.3, ValueError,
"Input a1 is not within the required interval of [-16777216, 16777216].")
test_invalid_input("invalid a2 parameter type as a String", 0.01, 0.02, 0.13, 1, 0.12, '0.3', TypeError,
"Argument a2 with value 0.3 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid a1 parameter value", 0.01, 0.02, 0.13, 1, 0.12, 441324343243242342345300, ValueError,
"Input a2 is not within the required interval of [-16777216, 16777216].")
if __name__ == '__main__':
test_func_biquad_eager()
test_func_biquad_pipeline()
test_biquad_invalid_input()
| [
"mindspore.dataset.audio.transforms.Biquad",
"numpy.count_nonzero",
"numpy.abs",
"pytest.raises",
"numpy.array",
"mindspore.dataset.NumpySlicesDataset"
] | [((993, 1024), 'numpy.abs', 'np.abs', (['(data_expected - data_me)'], {}), '(data_expected - data_me)\n', (999, 1024), True, 'import numpy as np\n'), ((1111, 1136), 'numpy.count_nonzero', 'np.count_nonzero', (['greater'], {}), '(greater)\n', (1127, 1136), True, 'import numpy as np\n'), ((1453, 1503), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'np.float64'}), '([[1, 2, 3], [4, 5, 6]], dtype=np.float64)\n', (1461, 1503), True, 'import numpy as np\n'), ((1548, 1623), 'numpy.array', 'np.array', (['[[0.01, 0.0388, 0.1923], [0.04, 0.1252, 0.653]]'], {'dtype': 'np.float64'}), '([[0.01, 0.0388, 0.1923], [0.04, 0.1252, 0.653]], dtype=np.float64)\n', (1556, 1623), True, 'import numpy as np\n'), ((1677, 1721), 'mindspore.dataset.audio.transforms.Biquad', 'audio.Biquad', (['(0.01)', '(0.02)', '(0.13)', '(1)', '(0.12)', '(0.3)'], {}), '(0.01, 0.02, 0.13, 1, 0.12, 0.3)\n', (1689, 1721), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((1991, 2051), 'numpy.array', 'np.array', (['[[3.2, 2.1, 1.3], [6.2, 5.3, 6]]'], {'dtype': 'np.float64'}), '([[3.2, 2.1, 1.3], [6.2, 5.3, 6]], dtype=np.float64)\n', (1999, 2051), True, 'import numpy as np\n'), ((2096, 2161), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.5844], [1.0, 1.0, 1.0]]'], {'dtype': 'np.float64'}), '([[1.0, 1.0, 0.5844], [1.0, 1.0, 1.0]], dtype=np.float64)\n', (2104, 2161), True, 'import numpy as np\n'), ((2223, 2280), 'mindspore.dataset.NumpySlicesDataset', 'ds.NumpySlicesDataset', (['waveform', "['audio']"], {'shuffle': '(False)'}), "(waveform, ['audio'], shuffle=False)\n", (2244, 2280), True, 'import mindspore.dataset as ds\n'), ((2297, 2338), 'mindspore.dataset.audio.transforms.Biquad', 'audio.Biquad', (['(1)', '(0.02)', '(0.13)', '(1)', '(0.12)', '(0.3)'], {}), '(1, 0.02, 0.13, 1, 0.12, 0.3)\n', (2309, 2338), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((2890, 2910), 'pytest.raises', 'pytest.raises', (['error'], {}), '(error)\n', (2903, 2910), False, 'import pytest\n'), ((2938, 2974), 'mindspore.dataset.audio.transforms.Biquad', 'audio.Biquad', (['b0', 'b1', 'b2', 'a0', 'a1', 'a2'], {}), '(b0, b1, b2, a0, a1, a2)\n', (2950, 2974), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((1064, 1085), 'numpy.abs', 'np.abs', (['data_expected'], {}), '(data_expected)\n', (1070, 1085), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_raises
from scipy.sparse.linalg import utils
def test_make_system_bad_shape():
assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
| [
"numpy.zeros"
] | [((184, 200), 'numpy.zeros', 'np.zeros', (['(5, 3)'], {}), '((5, 3))\n', (192, 200), True, 'import numpy as np\n'), ((207, 218), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (215, 218), True, 'import numpy as np\n'), ((220, 231), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (228, 231), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from utils import load_train, load_valid
from run_knn import run_knn
trainData = load_train()
validData = load_valid()
kRange = [1,3,5,7,9]
results = []
for k in kRange:
temp = run_knn(k, trainData[0],trainData[1],validData[0])
results.append(temp)
def classificationRate(validSet, trainResult):
return np.sum(validSet == trainResult)/len(validSet)
classificationRateResults = [classificationRate(validData[1], i) for i in results]
fig, graph = plt.subplots()
graph.plot(kRange, classificationRateResults, 'x')
graph.plot(kRange, classificationRateResults)
graph.set(xlabel='k value', ylabel='classification rate',
title='classification rate as a function of k')
graph.grid()
fig.savefig("q2_1.png")
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.sum",
"utils.load_train",
"run_knn.run_knn",
"matplotlib.pyplot.subplots",
"utils.load_valid"
] | [((133, 145), 'utils.load_train', 'load_train', ([], {}), '()\n', (143, 145), False, 'from utils import load_train, load_valid\n'), ((158, 170), 'utils.load_valid', 'load_valid', ([], {}), '()\n', (168, 170), False, 'from utils import load_train, load_valid\n'), ((517, 531), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (529, 531), True, 'import matplotlib.pyplot as plt\n'), ((779, 789), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (787, 789), True, 'import matplotlib.pyplot as plt\n'), ((234, 286), 'run_knn.run_knn', 'run_knn', (['k', 'trainData[0]', 'trainData[1]', 'validData[0]'], {}), '(k, trainData[0], trainData[1], validData[0])\n', (241, 286), False, 'from run_knn import run_knn\n'), ((373, 404), 'numpy.sum', 'np.sum', (['(validSet == trainResult)'], {}), '(validSet == trainResult)\n', (379, 404), True, 'import numpy as np\n')] |
# SIMULATE KDD1998
import sys
sys.path.insert(0, './src')
from shared_functions import *
from net_designs import *
import os
from scipy import stats as sc
import pandas as ps
import numpy as np
import random
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.pgmlearner import PGMLearner
from libpgm.tablecpdfactorization import TableCPDFactorization
import h5py
def sample_granular(net, bins, n_samples):
sampled_data = net.randomsample(n=n_samples)
granularize(sampled_data,bins)
return ps.DataFrame(sampled_data)
def granularize(sampled_data,bins):
for j in bins:
na_level = len(bins[j])
for i in xrange(len(sampled_data)):
ind = sampled_data[i][j]
if ind == na_level - 1:
sampled_data[i][j] = None
else:
sampled_data[i][j] = round((np.random.uniform() * (bins[j][ind+1] - bins[j][ind]) +
bins[j][ind]))
def random_policy(data):
return np.random.randint(0,12,data.shape[0])
def binom(x):
return np.random.binomial(8,x)/8.0
def propagate(data, classifier, regressor, policy, threshold=0.275, periods=12, orig_actions=None):
# Initializing arrays to hold output
customers = np.zeros((periods+1,data.shape[0],data.shape[1]), dtype = np.float32)
customers[0] = data
actions = np.zeros((periods,data.shape[0]), dtype = np.float32)
donations = np.zeros((periods,data.shape[0]), dtype = np.float32)
for t in xrange(periods):
# SELECTING ACTIONS
if isinstance(orig_actions, (np.ndarray)):
actions[t] = orig_actions[t]
else:
actions[t] = policy(customers[t])
inp = np.append(customers[t],actions[t].reshape((data.shape[0],1)),axis = 1).astype(np.float32)
# PROPAGATING CUSTOMERS
donation_prob = classifier.predict_proba(inp,verbose=0)[:,1]
donations_occurred = 1*(np.random.binomial(8,threshold,donation_prob.shape[0])/8.0 < np.apply_along_axis(binom, 0, donation_prob))
donations[t] = np.rint(regressor.predict(inp,verbose=0).squeeze() * donations_occurred).astype(np.float32)
# UPDATING CUSTOMER STATE
# Recency
customers[t+1,:,0] = (customers[t,:,0] + 1)*(donations_occurred == 0)
# Frequency
customers[t+1,:,1] = customers[t,:,1] + donations_occurred
# Avg. Past Donation
customers[t+1,:,2] = (customers[t,:,2] * customers[t,:,1] + donations[t]) / (customers[t+1,:,1] + 1*(customers[t+1,:,1]==0))
# Avg. Interaction Recency
customers[t+1,:,3] = (customers[t,:,3] + 1)*(actions[t] == 0) # Null action 0
# Avg. Interaction Frequency
customers[t+1,:,4] = customers[t,:,4] + (actions[t] != 0)
customers[t+1,:,5:] = customers[t,:,5:]
return customers, actions, donations
# LOAD MODELS
print('Loading models')
net_start_life = load("./results/kdd98_init_snapshot_start.p")
start_bins = load("./results/kdd98_init_snapshot_start_bins.p")
regressor = KDDRegressor()
regressor.load_weights("./results/kdd98_propagation_regressor_best.h5")
classifier = KDDClassifier()
classifier.load_weights("./results/kdd98_propagation_classifier_best.h5")
RANDOM_SEED = 999
# SIMULATION
test_samples = 1000
np.random.seed(RANDOM_SEED)
# SIMULATE INITIAL SNAPSHOT
sampled_data = sample_granular(net=net_start_life, bins=start_bins, n_samples=test_samples)
sampled_data = sampled_data.fillna(0)
sampled_data = sampled_data[['r0', 'f0', 'm0', 'ir0', 'if0', 'gender', 'age', 'income', 'zip_region']].values
# SIMULATE THROUGH TIME - STATES, ACTIONS, DONATIONS
S, A, D = propagate(sampled_data, classifier, regressor, random_policy, threshold=0.275, periods=18)
# SAVE DATA
print('Saving data')
h5f = h5py.File('./results/kdd98_simulation_results.h5', 'w')
h5f.create_dataset('S', data=S)
h5f.create_dataset('A', data=A)
h5f.create_dataset('D', data=D)
h5f.close()
# LOAD DATA
#h5f = h5py.File('./results/kdd98_simulation_results.h5','r')
#S = h5f['S'][:]
#A = h5f['A'][:]
#D = h5f['D'][:]
#h5f.close()
| [
"pandas.DataFrame",
"numpy.random.uniform",
"h5py.File",
"numpy.random.seed",
"numpy.random.binomial",
"numpy.zeros",
"sys.path.insert",
"numpy.apply_along_axis",
"numpy.random.randint"
] | [((32, 59), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./src"""'], {}), "(0, './src')\n", (47, 59), False, 'import sys\n'), ((3471, 3498), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (3485, 3498), True, 'import numpy as np\n'), ((3963, 4018), 'h5py.File', 'h5py.File', (['"""./results/kdd98_simulation_results.h5"""', '"""w"""'], {}), "('./results/kdd98_simulation_results.h5', 'w')\n", (3972, 4018), False, 'import h5py\n'), ((620, 646), 'pandas.DataFrame', 'ps.DataFrame', (['sampled_data'], {}), '(sampled_data)\n', (632, 646), True, 'import pandas as ps\n'), ((1099, 1138), 'numpy.random.randint', 'np.random.randint', (['(0)', '(12)', 'data.shape[0]'], {}), '(0, 12, data.shape[0])\n', (1116, 1138), True, 'import numpy as np\n'), ((1354, 1425), 'numpy.zeros', 'np.zeros', (['(periods + 1, data.shape[0], data.shape[1])'], {'dtype': 'np.float32'}), '((periods + 1, data.shape[0], data.shape[1]), dtype=np.float32)\n', (1362, 1425), True, 'import numpy as np\n'), ((1462, 1514), 'numpy.zeros', 'np.zeros', (['(periods, data.shape[0])'], {'dtype': 'np.float32'}), '((periods, data.shape[0]), dtype=np.float32)\n', (1470, 1514), True, 'import numpy as np\n'), ((1532, 1584), 'numpy.zeros', 'np.zeros', (['(periods, data.shape[0])'], {'dtype': 'np.float32'}), '((periods, data.shape[0]), dtype=np.float32)\n', (1540, 1584), True, 'import numpy as np\n'), ((1163, 1187), 'numpy.random.binomial', 'np.random.binomial', (['(8)', 'x'], {}), '(8, x)\n', (1181, 1187), True, 'import numpy as np\n'), ((2135, 2179), 'numpy.apply_along_axis', 'np.apply_along_axis', (['binom', '(0)', 'donation_prob'], {}), '(binom, 0, donation_prob)\n', (2154, 2179), True, 'import numpy as np\n'), ((2074, 2130), 'numpy.random.binomial', 'np.random.binomial', (['(8)', 'threshold', 'donation_prob.shape[0]'], {}), '(8, threshold, donation_prob.shape[0])\n', (2092, 2130), True, 'import numpy as np\n'), ((956, 975), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (973, 975), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import socket
import os
import time
def load_model():
model = None
return model
# the input img is an np.array(uint8)
# maybe you need to change img from 0-255 to 0-1
def get_label(model, img):
# label = model(img)
label = 1
return str(label)
def recv_img(sock, count):
buf = b''
while count:
newbuf=sock.recv(count)
if not newbuf:
return None
buf += newbuf
count -= len(newbuf)
return buf
class Wire:
def __init__(self, ipconf=('', 7878)):
self.model = load_model()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(ipconf)
self.sock.listen(100)
print('waiting...')
self.conn, _ = self.sock.accept()
self.conn.send(str.encode('Connencted Sucessfully').ljust(32))
def process(self):
try:
l = self.conn.recv(16).decode('utf-8')
except:
print('Connected Fail')
return
stringData = recv_img(self.conn, int(l))
print('receive an image')
img = np.frombuffer(stringData, np.uint8)
decimg = cv2.imdecode(img, cv2.IMREAD_COLOR)
label = get_label(self.model, decimg)
self.conn.send(label.encode('utf-8').ljust(16))
if __name__ == '__main__':
wire = Wire()
while 1:
wire.process()
| [
"numpy.frombuffer",
"socket.socket",
"cv2.imdecode"
] | [((612, 661), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (625, 661), False, 'import socket\n'), ((1133, 1168), 'numpy.frombuffer', 'np.frombuffer', (['stringData', 'np.uint8'], {}), '(stringData, np.uint8)\n', (1146, 1168), True, 'import numpy as np\n'), ((1186, 1221), 'cv2.imdecode', 'cv2.imdecode', (['img', 'cv2.IMREAD_COLOR'], {}), '(img, cv2.IMREAD_COLOR)\n', (1198, 1221), False, 'import cv2\n')] |
import codecs
import numpy as np
import matplotlib.pyplot as plt
import re
import ast
##content=codecs.open('Cluster2 Tweets.txt',"r",encoding="utf-8")
##regex = r"\w*crocodile\w*"
##c1=c2=c3=c4=c5=c6=c7=c8=c9=c10=c11=c12=0
##for i in content:
## matches = re.finditer(regex,i)
##
## for match in matches:
## if match:
## #print(i)
## if i.count('2015-12-01'):
## x = ast.literal_eval(i)
## temp=re.findall('2015-12-01 00|2015-12-01 01|2015-12-01 02|2015-12-01 03',x[3])
## if temp:
## c1+=1
## temp=re.findall('2015-12-01 04|2015-12-01 05|2015-12-01 06|2015-12-01 07',x[3])
## if temp:
## c2+=1
## temp=re.findall('2015-12-01 08|2015-12-01 09|2015-12-01 10|2015-12-01 11',x[3])
## if temp:
## c3+=1
## temp=re.findall('2015-12-01 12|2015-12-01 13|2015-12-01 14|2015-12-01 15',x[3])
## if temp:
## c4+=1
## temp=re.findall('2015-12-01 16|2015-12-01 17|2015-12-01 18|2015-12-01 19',x[3])
## if temp:
## c5+=1
## temp=re.findall('2015-12-01 20|2015-12-01 21|2015-12-01 22|2015-12-01 23',x[3])
## if temp:
## c6+=1
## if i.count('2015-12-02'):
## x = ast.literal_eval(i)
## temp=re.findall('2015-12-02 00|2015-12-02 01|2015-12-02 02|2015-12-02 03',x[3])
## if temp:
## c7+=1
## temp=re.findall('2015-12-02 04|2015-12-02 05|2015-12-02 06|2015-12-02 07',x[3])
## if temp:
## c8+=1
## temp=re.findall('2015-12-02 08|2015-12-02 09|2015-12-02 10|2015-12-02 11',x[3])
## if temp:
## c9+=1
## temp=re.findall('2015-12-02 12|2015-12-02 13|2015-12-02 14|2015-12-02 15',x[3])
## if temp:
## c10+=1
## temp=re.findall('2015-12-02 16|2015-12-02 17|2015-12-02 18|2015-12-02 19',x[3])
## if temp:
## c11+=1
## temp=re.findall('2015-12-02 20|2015-12-02 21|2015-12-02 22|2015-12-02 23',x[3])
## if temp:
## c12+=1
##
##print(c1)
##print(c2)
##print(c3)
##print(c4)
##print(c5)
##print(c6)
##print(c7)
##print(c8)
##print(c9)
##print(c10)
##print(c11)
##print(c12)
##print("==============================================================")
##
##fig, ax=plt.subplots()
##index=np.arange(13)
##bar_width=0.15
##opacity=0.9
##
##belief0=(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12)
##y0=[1,2,3,4,5,6,7,8,9,10,11,12]
##
##
##content=codecs.open('Cluster2 Tweets.txt',"r",encoding="utf-8")
##regex = r"\w*chembarambakkam\w*"
##c1=c2=c3=c4=c5=c6=c7=c8=c9=c10=c11=c12=0
##for i in content:
## matches = re.finditer(regex,i)
##
## for match in matches:
## if match:
## #print(i)
## if i.count('2015-12-01'):
## x = ast.literal_eval(i)
## temp=re.findall('2015-12-01 00|2015-12-01 01|2015-12-01 02|2015-12-01 03',x[3])
## if temp:
## c1+=1
## temp=re.findall('2015-12-01 04|2015-12-01 05|2015-12-01 06|2015-12-01 07',x[3])
## if temp:
## c2+=1
## temp=re.findall('2015-12-01 08|2015-12-01 09|2015-12-01 10|2015-12-01 11',x[3])
## if temp:
## c3+=1
## temp=re.findall('2015-12-01 12|2015-12-01 13|2015-12-01 14|2015-12-01 15',x[3])
## if temp:
## c4+=1
## temp=re.findall('2015-12-01 16|2015-12-01 17|2015-12-01 18|2015-12-01 19',x[3])
## if temp:
## c5+=1
## temp=re.findall('2015-12-01 20|2015-12-01 21|2015-12-01 22|2015-12-01 23',x[3])
## if temp:
## c6+=1
## if i.count('2015-12-02'):
## x = ast.literal_eval(i)
## temp=re.findall('2015-12-02 00|2015-12-02 01|2015-12-02 02|2015-12-02 03',x[3])
## if temp:
## c7+=1
## temp=re.findall('2015-12-02 04|2015-12-02 05|2015-12-02 06|2015-12-02 07',x[3])
## if temp:
## c8+=1
## temp=re.findall('2015-12-02 08|2015-12-02 09|2015-12-02 10|2015-12-02 11',x[3])
## if temp:
## c9+=1
## temp=re.findall('2015-12-02 12|2015-12-02 13|2015-12-02 14|2015-12-02 15',x[3])
## if temp:
## c10+=1
## temp=re.findall('2015-12-02 16|2015-12-02 17|2015-12-02 18|2015-12-02 19',x[3])
## if temp:
## c11+=1
## temp=re.findall('2015-12-02 20|2015-12-02 21|2015-12-02 22|2015-12-02 23',x[3])
## if temp:
## c12+=1
##
##print(c1)
##print(c2)
##print(c3)
##print(c4)
##print(c5)
##print(c6)
##print(c7)
##print(c8)
##print(c9)
##print(c10)
##print(c11)
##print(c12)
##print("==============================================================")
##
##fig, ax=plt.subplots()
##index=np.arange(13)
##bar_width=0.15
##opacity=0.9
##
##belief1=(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12)
##y1=[1,2,3,4,5,6,7,8,9,10,11,12]
##plt.plot(y,belief)
##plt.xlabel('Time in hrs')
##plt.ylabel('No. of tweets')
##plt.xticks(index+(6.6*bar_width),('0-3','4-7','8-11','12-15','16-19','20-23','0-3','4-7','8-11','12-15','16-19','20-23'))
##plt.plot(y,belief,'bo',linestyle='-')
content=codecs.open('Cluster3 Tweets.txt',"r",encoding="utf-8")
regex = r"\w*little girl\w*"
c1=c2=c3=c4=c5=c6=c7=c8=c9=c10=c11=c12=0
for i in content:
matches = re.finditer(regex,i)
for match in matches:
if match:
#print(i)
if i.count('2015-12-04'):
x = ast.literal_eval(i)
temp=re.findall('2015-12-04 00|2015-12-04 01|2015-12-04 02|2015-12-04 03',x[3])
if temp:
c1+=1
temp=re.findall('2015-12-04 04|2015-12-04 05|2015-12-04 06|2015-12-04 07',x[3])
if temp:
c2+=1
temp=re.findall('2015-12-04 08|2015-12-04 09|2015-12-04 10|2015-12-04 11',x[3])
if temp:
c3+=1
temp=re.findall('2015-12-04 12|2015-12-04 13|2015-12-04 14|2015-12-04 15',x[3])
if temp:
c4+=1
temp=re.findall('2015-12-04 16|2015-12-04 17|2015-12-04 18|2015-12-04 19',x[3])
if temp:
c5+=1
temp=re.findall('2015-12-04 20|2015-12-04 21|2015-12-04 22|2015-12-04 23',x[3])
if temp:
c6+=1
if i.count('2015-12-05'):
x = ast.literal_eval(i)
temp=re.findall('2015-12-05 00|2015-12-05 01|2015-12-05 02|2015-12-05 03',x[3])
if temp:
c7+=1
temp=re.findall('2015-12-05 04|2015-12-05 05|2015-12-05 06|2015-12-05 07',x[3])
if temp:
c8+=1
temp=re.findall('2015-12-05 08|2015-12-05 09|2015-12-05 10|2015-12-05 11',x[3])
if temp:
c9+=1
temp=re.findall('2015-12-05 12|2015-12-05 13|2015-12-05 14|2015-12-05 15',x[3])
if temp:
c10+=1
temp=re.findall('2015-12-05 16|2015-12-05 17|2015-12-05 18|2015-12-05 19',x[3])
if temp:
c11+=1
temp=re.findall('2015-12-05 20|2015-12-05 21|2015-12-05 22|2015-12-05 23',x[3])
if temp:
c12+=1
print(c1)
print(c2)
print(c3)
print(c4)
print(c5)
print(c6)
print(c7)
print(c8)
print(c9)
print(c10)
print(c11)
print(c12)
print("==============================================================")
fig, ax=plt.subplots()
index=np.arange(13)
bar_width=0.15
opacity=0.9
belief2=(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12)
y2=[1,2,3,4,5,6,7,8,9,10,11,12]
##plt.plot(y,belief)
##plt.xlabel('Time in hrs')
##plt.ylabel('No. of tweets')
##plt.xticks(index+(6.6*bar_width),('0-3','4-7','8-11','12-15','16-19','20-23','0-3','4-7','8-11','12-15','16-19','20-23'))
##plt.plot(y,belief,'bo',linestyle='-')
content=codecs.open('Cluster5 Tweets.txt',"r",encoding="utf-8")
regex = r"\w*passport\w*"
c1=c2=c3=c4=c5=c6=c7=c8=c9=c10=c11=c12=0
for i in content:
matches = re.finditer(regex,i)
for match in matches:
if match:
#print(i)
if i.count('2015-12-07'):
x = ast.literal_eval(i)
temp=re.findall('2015-12-07 00|2015-12-07 01|2015-12-07 02|2015-12-07 03',x[3])
if temp:
c1+=1
temp=re.findall('2015-12-07 04|2015-12-07 05|2015-12-07 06|2015-12-07 07',x[3])
if temp:
c2+=1
temp=re.findall('2015-12-07 08|2015-12-07 09|2015-12-07 10|2015-12-07 11',x[3])
if temp:
c3+=1
temp=re.findall('2015-12-07 12|2015-12-07 13|2015-12-07 14|2015-12-07 15',x[3])
if temp:
c4+=1
temp=re.findall('2015-12-07 16|2015-12-07 17|2015-12-07 18|2015-12-07 19',x[3])
if temp:
c5+=1
temp=re.findall('2015-12-07 20|2015-12-07 21|2015-12-07 22|2015-12-07 23',x[3])
if temp:
c6+=1
if i.count('2015-12-08'):
x = ast.literal_eval(i)
temp=re.findall('2015-12-08 00|2015-12-08 01|2015-12-08 02|2015-12-08 03',x[3])
if temp:
c7+=1
temp=re.findall('2015-12-08 04|2015-12-08 05|2015-12-08 06|2015-12-08 07',x[3])
if temp:
c8+=1
temp=re.findall('2015-12-08 08|2015-12-08 09|2015-12-08 10|2015-12-08 11',x[3])
if temp:
c9+=1
temp=re.findall('2015-12-08 12|2015-12-08 13|2015-12-08 14|2015-12-08 15',x[3])
if temp:
c10+=1
temp=re.findall('2015-12-08 16|2015-12-08 17|2015-12-08 18|2015-12-08 19',x[3])
if temp:
c11+=1
temp=re.findall('2015-12-08 20|2015-12-08 21|2015-12-08 22|2015-12-08 23',x[3])
if temp:
c12+=1
print(c1)
print(c2)
print(c3)
print(c4)
print(c5)
print(c6)
print(c7)
print(c8)
print(c9)
print(c10)
print(c11)
print(c12)
print("==============================================================")
fig, ax=plt.subplots()
index=np.arange(13)
bar_width=0.15
opacity=0.9
belief3=(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12)
y3=[1,2,3,4,5,6,7,8,9,10,11,12]
#plt.plot(y0,belief0,y1,belief1,y2,belief2,y3,belief3)
plt.xlabel('Time in hrs')
plt.ylabel('No. of tweets')
plt.xticks(index+(6.6*bar_width),('0-3','4-7','8-11','12-15','16-19','20-23','0-3','4-7','8-11','12-15','16-19','20-23'))
plt.plot(y2,belief2,'bo',linestyle='-',label='Little girl lost')
plt.plot(y3,belief3,'yo',linestyle='-',label='Passport Damaged')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.show",
"codecs.open",
"matplotlib.pyplot.plot",
"re.finditer",
"matplotlib.pyplot.legend",
"re.findall",
"numpy.arange",
"matplotlib.pyplot.xticks",
"ast.literal_eval",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((5683, 5740), 'codecs.open', 'codecs.open', (['"""Cluster3 Tweets.txt"""', '"""r"""'], {'encoding': '"""utf-8"""'}), "('Cluster3 Tweets.txt', 'r', encoding='utf-8')\n", (5694, 5740), False, 'import codecs\n'), ((8058, 8072), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8070, 8072), True, 'import matplotlib.pyplot as plt\n'), ((8079, 8092), 'numpy.arange', 'np.arange', (['(13)'], {}), '(13)\n', (8088, 8092), True, 'import numpy as np\n'), ((8455, 8512), 'codecs.open', 'codecs.open', (['"""Cluster5 Tweets.txt"""', '"""r"""'], {'encoding': '"""utf-8"""'}), "('Cluster5 Tweets.txt', 'r', encoding='utf-8')\n", (8466, 8512), False, 'import codecs\n'), ((10826, 10840), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10838, 10840), True, 'import matplotlib.pyplot as plt\n'), ((10847, 10860), 'numpy.arange', 'np.arange', (['(13)'], {}), '(13)\n', (10856, 10860), True, 'import numpy as np\n'), ((11025, 11050), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time in hrs"""'], {}), "('Time in hrs')\n", (11035, 11050), True, 'import matplotlib.pyplot as plt\n'), ((11051, 11078), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""No. of tweets"""'], {}), "('No. of tweets')\n", (11061, 11078), True, 'import matplotlib.pyplot as plt\n'), ((11079, 11218), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(index + 6.6 * bar_width)', "('0-3', '4-7', '8-11', '12-15', '16-19', '20-23', '0-3', '4-7', '8-11',\n '12-15', '16-19', '20-23')"], {}), "(index + 6.6 * bar_width, ('0-3', '4-7', '8-11', '12-15', '16-19',\n '20-23', '0-3', '4-7', '8-11', '12-15', '16-19', '20-23'))\n", (11089, 11218), True, 'import matplotlib.pyplot as plt\n'), ((11201, 11269), 'matplotlib.pyplot.plot', 'plt.plot', (['y2', 'belief2', '"""bo"""'], {'linestyle': '"""-"""', 'label': '"""Little girl lost"""'}), "(y2, belief2, 'bo', linestyle='-', label='Little girl lost')\n", (11209, 11269), True, 'import matplotlib.pyplot as plt\n'), ((11266, 11334), 'matplotlib.pyplot.plot', 'plt.plot', (['y3', 'belief3', '"""yo"""'], {'linestyle': '"""-"""', 'label': '"""Passport Damaged"""'}), "(y3, belief3, 'yo', linestyle='-', label='Passport Damaged')\n", (11274, 11334), True, 'import matplotlib.pyplot as plt\n'), ((11331, 11343), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11341, 11343), True, 'import matplotlib.pyplot as plt\n'), ((11344, 11354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11352, 11354), True, 'import matplotlib.pyplot as plt\n'), ((5841, 5862), 're.finditer', 're.finditer', (['regex', 'i'], {}), '(regex, i)\n', (5852, 5862), False, 'import re\n'), ((8610, 8631), 're.finditer', 're.finditer', (['regex', 'i'], {}), '(regex, i)\n', (8621, 8631), False, 'import re\n'), ((5987, 6006), 'ast.literal_eval', 'ast.literal_eval', (['i'], {}), '(i)\n', (6003, 6006), False, 'import ast\n'), ((6028, 6103), 're.findall', 're.findall', (['"""2015-12-04 00|2015-12-04 01|2015-12-04 02|2015-12-04 03"""', 'x[3]'], {}), "('2015-12-04 00|2015-12-04 01|2015-12-04 02|2015-12-04 03', x[3])\n", (6038, 6103), False, 'import re\n'), ((6175, 6250), 're.findall', 're.findall', (['"""2015-12-04 04|2015-12-04 05|2015-12-04 06|2015-12-04 07"""', 'x[3]'], {}), "('2015-12-04 04|2015-12-04 05|2015-12-04 06|2015-12-04 07', x[3])\n", (6185, 6250), False, 'import re\n'), ((6322, 6397), 're.findall', 're.findall', (['"""2015-12-04 08|2015-12-04 09|2015-12-04 10|2015-12-04 11"""', 'x[3]'], {}), "('2015-12-04 08|2015-12-04 09|2015-12-04 10|2015-12-04 11', x[3])\n", (6332, 6397), False, 'import re\n'), ((6469, 6544), 're.findall', 're.findall', (['"""2015-12-04 12|2015-12-04 13|2015-12-04 14|2015-12-04 15"""', 'x[3]'], {}), "('2015-12-04 12|2015-12-04 13|2015-12-04 14|2015-12-04 15', x[3])\n", (6479, 6544), False, 'import re\n'), ((6616, 6691), 're.findall', 're.findall', (['"""2015-12-04 16|2015-12-04 17|2015-12-04 18|2015-12-04 19"""', 'x[3]'], {}), "('2015-12-04 16|2015-12-04 17|2015-12-04 18|2015-12-04 19', x[3])\n", (6626, 6691), False, 'import re\n'), ((6763, 6838), 're.findall', 're.findall', (['"""2015-12-04 20|2015-12-04 21|2015-12-04 22|2015-12-04 23"""', 'x[3]'], {}), "('2015-12-04 20|2015-12-04 21|2015-12-04 22|2015-12-04 23', x[3])\n", (6773, 6838), False, 'import re\n'), ((6947, 6966), 'ast.literal_eval', 'ast.literal_eval', (['i'], {}), '(i)\n', (6963, 6966), False, 'import ast\n'), ((6988, 7063), 're.findall', 're.findall', (['"""2015-12-05 00|2015-12-05 01|2015-12-05 02|2015-12-05 03"""', 'x[3]'], {}), "('2015-12-05 00|2015-12-05 01|2015-12-05 02|2015-12-05 03', x[3])\n", (6998, 7063), False, 'import re\n'), ((7135, 7210), 're.findall', 're.findall', (['"""2015-12-05 04|2015-12-05 05|2015-12-05 06|2015-12-05 07"""', 'x[3]'], {}), "('2015-12-05 04|2015-12-05 05|2015-12-05 06|2015-12-05 07', x[3])\n", (7145, 7210), False, 'import re\n'), ((7282, 7357), 're.findall', 're.findall', (['"""2015-12-05 08|2015-12-05 09|2015-12-05 10|2015-12-05 11"""', 'x[3]'], {}), "('2015-12-05 08|2015-12-05 09|2015-12-05 10|2015-12-05 11', x[3])\n", (7292, 7357), False, 'import re\n'), ((7429, 7504), 're.findall', 're.findall', (['"""2015-12-05 12|2015-12-05 13|2015-12-05 14|2015-12-05 15"""', 'x[3]'], {}), "('2015-12-05 12|2015-12-05 13|2015-12-05 14|2015-12-05 15', x[3])\n", (7439, 7504), False, 'import re\n'), ((7577, 7652), 're.findall', 're.findall', (['"""2015-12-05 16|2015-12-05 17|2015-12-05 18|2015-12-05 19"""', 'x[3]'], {}), "('2015-12-05 16|2015-12-05 17|2015-12-05 18|2015-12-05 19', x[3])\n", (7587, 7652), False, 'import re\n'), ((7725, 7800), 're.findall', 're.findall', (['"""2015-12-05 20|2015-12-05 21|2015-12-05 22|2015-12-05 23"""', 'x[3]'], {}), "('2015-12-05 20|2015-12-05 21|2015-12-05 22|2015-12-05 23', x[3])\n", (7735, 7800), False, 'import re\n'), ((8756, 8775), 'ast.literal_eval', 'ast.literal_eval', (['i'], {}), '(i)\n', (8772, 8775), False, 'import ast\n'), ((8797, 8872), 're.findall', 're.findall', (['"""2015-12-07 00|2015-12-07 01|2015-12-07 02|2015-12-07 03"""', 'x[3]'], {}), "('2015-12-07 00|2015-12-07 01|2015-12-07 02|2015-12-07 03', x[3])\n", (8807, 8872), False, 'import re\n'), ((8944, 9019), 're.findall', 're.findall', (['"""2015-12-07 04|2015-12-07 05|2015-12-07 06|2015-12-07 07"""', 'x[3]'], {}), "('2015-12-07 04|2015-12-07 05|2015-12-07 06|2015-12-07 07', x[3])\n", (8954, 9019), False, 'import re\n'), ((9091, 9166), 're.findall', 're.findall', (['"""2015-12-07 08|2015-12-07 09|2015-12-07 10|2015-12-07 11"""', 'x[3]'], {}), "('2015-12-07 08|2015-12-07 09|2015-12-07 10|2015-12-07 11', x[3])\n", (9101, 9166), False, 'import re\n'), ((9238, 9313), 're.findall', 're.findall', (['"""2015-12-07 12|2015-12-07 13|2015-12-07 14|2015-12-07 15"""', 'x[3]'], {}), "('2015-12-07 12|2015-12-07 13|2015-12-07 14|2015-12-07 15', x[3])\n", (9248, 9313), False, 'import re\n'), ((9385, 9460), 're.findall', 're.findall', (['"""2015-12-07 16|2015-12-07 17|2015-12-07 18|2015-12-07 19"""', 'x[3]'], {}), "('2015-12-07 16|2015-12-07 17|2015-12-07 18|2015-12-07 19', x[3])\n", (9395, 9460), False, 'import re\n'), ((9532, 9607), 're.findall', 're.findall', (['"""2015-12-07 20|2015-12-07 21|2015-12-07 22|2015-12-07 23"""', 'x[3]'], {}), "('2015-12-07 20|2015-12-07 21|2015-12-07 22|2015-12-07 23', x[3])\n", (9542, 9607), False, 'import re\n'), ((9716, 9735), 'ast.literal_eval', 'ast.literal_eval', (['i'], {}), '(i)\n', (9732, 9735), False, 'import ast\n'), ((9757, 9832), 're.findall', 're.findall', (['"""2015-12-08 00|2015-12-08 01|2015-12-08 02|2015-12-08 03"""', 'x[3]'], {}), "('2015-12-08 00|2015-12-08 01|2015-12-08 02|2015-12-08 03', x[3])\n", (9767, 9832), False, 'import re\n'), ((9904, 9979), 're.findall', 're.findall', (['"""2015-12-08 04|2015-12-08 05|2015-12-08 06|2015-12-08 07"""', 'x[3]'], {}), "('2015-12-08 04|2015-12-08 05|2015-12-08 06|2015-12-08 07', x[3])\n", (9914, 9979), False, 'import re\n'), ((10051, 10126), 're.findall', 're.findall', (['"""2015-12-08 08|2015-12-08 09|2015-12-08 10|2015-12-08 11"""', 'x[3]'], {}), "('2015-12-08 08|2015-12-08 09|2015-12-08 10|2015-12-08 11', x[3])\n", (10061, 10126), False, 'import re\n'), ((10198, 10273), 're.findall', 're.findall', (['"""2015-12-08 12|2015-12-08 13|2015-12-08 14|2015-12-08 15"""', 'x[3]'], {}), "('2015-12-08 12|2015-12-08 13|2015-12-08 14|2015-12-08 15', x[3])\n", (10208, 10273), False, 'import re\n'), ((10346, 10421), 're.findall', 're.findall', (['"""2015-12-08 16|2015-12-08 17|2015-12-08 18|2015-12-08 19"""', 'x[3]'], {}), "('2015-12-08 16|2015-12-08 17|2015-12-08 18|2015-12-08 19', x[3])\n", (10356, 10421), False, 'import re\n'), ((10494, 10569), 're.findall', 're.findall', (['"""2015-12-08 20|2015-12-08 21|2015-12-08 22|2015-12-08 23"""', 'x[3]'], {}), "('2015-12-08 20|2015-12-08 21|2015-12-08 22|2015-12-08 23', x[3])\n", (10504, 10569), False, 'import re\n')] |
from util import (get_data_from_id,
read_kpt_file)
import glob
import os
import numpy as np
from skimage.io import (imread,
imsave)
from skimage.transform import resize
root_dir = os.environ['DIR_3DFAW']
def prepare_train():
ids = glob.glob("%s/train_img/*.jpg" % root_dir)
ids = [os.path.basename(id_).replace(".jpg","") for id_ in ids ]
y_keypts, z_keypts = get_keypts_from_ids(ids, "train")
np.savez(file="%s/train" % root_dir,
y_keypts=y_keypts,
z_keypts=z_keypts)
def get_keypts_from_ids(ids, mode):
y_keypts = []
z_keypts = []
x_keypts = []
meta = []
for k, id_ in enumerate(ids):
print("%i / %i" % (k, len(ids)))
_,b,c = get_data_from_id(root=root_dir, mode=mode, id_=id_)
# a is f64, let's make it uint8 to save some space.
#a = (a*256.).astype("uint8")
#imgs.append(a)
y_keypts.append(b.astype("float32"))
z_keypts.append(c.astype("float32"))
#imgs = np.asarray(imgs)
y_keypts = np.asarray(y_keypts)
z_keypts = np.asarray(z_keypts)
return y_keypts, z_keypts
def prepare_valid():
ids = []
with open("%s/list_valid_test.txt" % root_dir) as f:
for line in f:
line = line.rstrip().split(",")
if line[1] == "valid":
ids.append(line[0])
y_keypts, z_keypts = get_keypts_from_ids(ids, "valid")
np.savez(file="%s/valid" % root_dir,
y_keypts=y_keypts,
z_keypts=z_keypts,
ids=ids)
def prepare_test():
ids = []
orientations = []
with open("%s/list_valid_test.txt" % root_dir) as f:
for line in f:
line = line.rstrip().split(",")
if line[1] == "test":
ids.append(line[0])
orientations.append(line[2])
y_keypts, z_keypts = get_keypts_from_ids(ids, "valid") # yes, valid
np.savez(file="%s/test" % root_dir,
y_keypts=y_keypts,
z_keypts=z_keypts,
ids=ids,
orientations=orientations)
def prepare_valid_imgs_downsized():
ids = glob.glob("%s/valid_img/*.jpg" % root_dir)
ids = [os.path.basename(id_).replace(".jpg","") for id_ in ids]
output_folder = "%s/valid_img_cropped_80x80" % root_dir
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for id_ in ids:
kpts = read_kpt_file("%s/valid_lm/%s_lm.csv" % (root_dir, id_))
img = imread("%s/valid_img/%s.jpg" % (root_dir, id_))
img = img[ int(np.min(kpts[:,1])):int(np.max(kpts[:,1])),
int(np.min(kpts[:,0])):int(np.max(kpts[:,0]))]
img = resize(img, (80, 80))
imsave(arr=img, fname="%s/%s.jpg" % (output_folder, id_))
if __name__ == '__main__':
prepare_train()
prepare_valid()
prepare_test()
prepare_valid_imgs_downsized()
| [
"os.makedirs",
"os.path.basename",
"numpy.asarray",
"util.get_data_from_id",
"os.path.exists",
"util.read_kpt_file",
"numpy.min",
"numpy.max",
"skimage.transform.resize",
"glob.glob",
"numpy.savez",
"skimage.io.imsave",
"skimage.io.imread"
] | [((279, 321), 'glob.glob', 'glob.glob', (["('%s/train_img/*.jpg' % root_dir)"], {}), "('%s/train_img/*.jpg' % root_dir)\n", (288, 321), False, 'import glob\n'), ((454, 528), 'numpy.savez', 'np.savez', ([], {'file': "('%s/train' % root_dir)", 'y_keypts': 'y_keypts', 'z_keypts': 'z_keypts'}), "(file='%s/train' % root_dir, y_keypts=y_keypts, z_keypts=z_keypts)\n", (462, 528), True, 'import numpy as np\n'), ((1059, 1079), 'numpy.asarray', 'np.asarray', (['y_keypts'], {}), '(y_keypts)\n', (1069, 1079), True, 'import numpy as np\n'), ((1095, 1115), 'numpy.asarray', 'np.asarray', (['z_keypts'], {}), '(z_keypts)\n', (1105, 1115), True, 'import numpy as np\n'), ((1439, 1526), 'numpy.savez', 'np.savez', ([], {'file': "('%s/valid' % root_dir)", 'y_keypts': 'y_keypts', 'z_keypts': 'z_keypts', 'ids': 'ids'}), "(file='%s/valid' % root_dir, y_keypts=y_keypts, z_keypts=z_keypts,\n ids=ids)\n", (1447, 1526), True, 'import numpy as np\n'), ((1937, 2050), 'numpy.savez', 'np.savez', ([], {'file': "('%s/test' % root_dir)", 'y_keypts': 'y_keypts', 'z_keypts': 'z_keypts', 'ids': 'ids', 'orientations': 'orientations'}), "(file='%s/test' % root_dir, y_keypts=y_keypts, z_keypts=z_keypts,\n ids=ids, orientations=orientations)\n", (1945, 2050), True, 'import numpy as np\n'), ((2146, 2188), 'glob.glob', 'glob.glob', (["('%s/valid_img/*.jpg' % root_dir)"], {}), "('%s/valid_img/*.jpg' % root_dir)\n", (2155, 2188), False, 'import glob\n'), ((751, 802), 'util.get_data_from_id', 'get_data_from_id', ([], {'root': 'root_dir', 'mode': 'mode', 'id_': 'id_'}), '(root=root_dir, mode=mode, id_=id_)\n', (767, 802), False, 'from util import get_data_from_id, read_kpt_file\n'), ((2328, 2357), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (2342, 2357), False, 'import os\n'), ((2367, 2393), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (2378, 2393), False, 'import os\n'), ((2429, 2485), 'util.read_kpt_file', 'read_kpt_file', (["('%s/valid_lm/%s_lm.csv' % (root_dir, id_))"], {}), "('%s/valid_lm/%s_lm.csv' % (root_dir, id_))\n", (2442, 2485), False, 'from util import get_data_from_id, read_kpt_file\n'), ((2500, 2547), 'skimage.io.imread', 'imread', (["('%s/valid_img/%s.jpg' % (root_dir, id_))"], {}), "('%s/valid_img/%s.jpg' % (root_dir, id_))\n", (2506, 2547), False, 'from skimage.io import imread, imsave\n'), ((2694, 2715), 'skimage.transform.resize', 'resize', (['img', '(80, 80)'], {}), '(img, (80, 80))\n', (2700, 2715), False, 'from skimage.transform import resize\n'), ((2724, 2781), 'skimage.io.imsave', 'imsave', ([], {'arr': 'img', 'fname': "('%s/%s.jpg' % (output_folder, id_))"}), "(arr=img, fname='%s/%s.jpg' % (output_folder, id_))\n", (2730, 2781), False, 'from skimage.io import imread, imsave\n'), ((333, 354), 'os.path.basename', 'os.path.basename', (['id_'], {}), '(id_)\n', (349, 354), False, 'import os\n'), ((2200, 2221), 'os.path.basename', 'os.path.basename', (['id_'], {}), '(id_)\n', (2216, 2221), False, 'import os\n'), ((2571, 2589), 'numpy.min', 'np.min', (['kpts[:, 1]'], {}), '(kpts[:, 1])\n', (2577, 2589), True, 'import numpy as np\n'), ((2594, 2612), 'numpy.max', 'np.max', (['kpts[:, 1]'], {}), '(kpts[:, 1])\n', (2600, 2612), True, 'import numpy as np\n'), ((2637, 2655), 'numpy.min', 'np.min', (['kpts[:, 0]'], {}), '(kpts[:, 0])\n', (2643, 2655), True, 'import numpy as np\n'), ((2660, 2678), 'numpy.max', 'np.max', (['kpts[:, 0]'], {}), '(kpts[:, 0])\n', (2666, 2678), True, 'import numpy as np\n')] |
import numpy as np
from datetime import date
import unittest
from rebalance.utils import dates_till_target, fill_price_gaps
##############
class DatesPricesTest(unittest.TestCase):
def test_dates_till_target(self):
act_dates = dates_till_target(days=2, target=date(2016,1,1))
exp_dates = np.array([[date(2015,12,31)],[date(2016,1,1)]])
self.assertTrue((exp_dates == act_dates).all())
def test_fill_price_gaps(self):
dates = np.array([[date(2016,1,1)],[date(2016,1,3)]])
prices = np.array([[42],[44]])
act_dates, act_prices = fill_price_gaps(dates, prices)
exp_dates = np.array([[date(2016,1,1)],[date(2016,1,2)],[date(2016,1,3)]])
exp_prices = np.array([[42],[42],[44]])
self.assertTrue((exp_dates == act_dates).all())
self.assertTrue((exp_prices == act_prices).all())
##############
| [
"rebalance.utils.fill_price_gaps",
"numpy.array",
"datetime.date"
] | [((547, 569), 'numpy.array', 'np.array', (['[[42], [44]]'], {}), '([[42], [44]])\n', (555, 569), True, 'import numpy as np\n'), ((602, 632), 'rebalance.utils.fill_price_gaps', 'fill_price_gaps', (['dates', 'prices'], {}), '(dates, prices)\n', (617, 632), False, 'from rebalance.utils import dates_till_target, fill_price_gaps\n'), ((739, 767), 'numpy.array', 'np.array', (['[[42], [42], [44]]'], {}), '([[42], [42], [44]])\n', (747, 767), True, 'import numpy as np\n'), ((285, 301), 'datetime.date', 'date', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (289, 301), False, 'from datetime import date\n'), ((333, 351), 'datetime.date', 'date', (['(2015)', '(12)', '(31)'], {}), '(2015, 12, 31)\n', (337, 351), False, 'from datetime import date\n'), ((352, 368), 'datetime.date', 'date', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (356, 368), False, 'from datetime import date\n'), ((494, 510), 'datetime.date', 'date', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (498, 510), False, 'from datetime import date\n'), ((511, 527), 'datetime.date', 'date', (['(2016)', '(1)', '(3)'], {}), '(2016, 1, 3)\n', (515, 527), False, 'from datetime import date\n'), ((665, 681), 'datetime.date', 'date', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (669, 681), False, 'from datetime import date\n'), ((682, 698), 'datetime.date', 'date', (['(2016)', '(1)', '(2)'], {}), '(2016, 1, 2)\n', (686, 698), False, 'from datetime import date\n'), ((699, 715), 'datetime.date', 'date', (['(2016)', '(1)', '(3)'], {}), '(2016, 1, 3)\n', (703, 715), False, 'from datetime import date\n')] |
from tkinter import *
from PIL import Image, ImageTk
from numpy import asarray
from skimage.measure import label, regionprops
from skimage import filters
import tkinter as tk
import tkinter.ttk as ttk
import numpy as np
import math
# #####################
# Słowem wstępu
# #####################
# Ta aplikacja była naszym pierwszym spotkaniem z językiem python
# pewnie wiele rzeczy dałoby się zrobić wydajniej / lepiej.
# Do osób, które w przyszłości potencjalnie mogłyby rozwijać ten projekt
# Przepraszamy za bałagan - w zamian postaramy się jak najlepiej wytłumaczyć każdą linijkę kodu
# TODO - Rozdzielić na pliki
# TODO - Dopracować layout
# TODO - Scrollbar w liście itemów
# TODO - Lista dostępnych obrazków / Importowanie własnego?
# TODO - Optymaliiiiiiizacja
# TODO - Opis
# Jako argument przyjmuje instancję FullImage. Uruchamia to opcję wyszukiwania obiektów w obrazie
def getSmallerImages(image):
image.labelObjects()
image.prettyColors()
image.detectedObjects = getImageObjects(image)
image.image = Image.fromarray(image.imageArray, "L")
image.imageComponent = ImageTk.PhotoImage(image.image)
return image
# Jako argument przyjmuje instancję FullImage. Zwraca tablicę znalezionych obiektów w obrazie
def getImageObjects(image):
_allColors = image.getImageColors()
_objectArray = []
for color in _allColors:
_widthStart = 0
_widthEnd = 0
_heightStart = 0
_heightEnd = 0
for x in range(1, image.image.size[1] - 1):
if color in image.imageArray[x]:
if _heightStart == 0 or x < _heightStart:
_heightStart = x
_heightEnd = x
for y in range(1, image.image.size[0] - 1):
if (_widthStart == 0 or y < _widthStart) and image.imageArray[x, y] == color:
_widthStart = y
if image.imageArray[x, y] == color and y > _widthEnd:
_widthEnd = y
_object = []
for x in range(_heightStart - 5, _heightEnd + 5):
_row = []
for y in range(_widthStart - 5, _widthEnd + 5):
if image.imageArray[x, y] == color:
_row.append(255)
else:
_row.append(0)
_object.append(_row)
if len(_object) < len(_object[0]):
blankRow = [0] * len(_object[0])
status = True
while len(_object) < len(_object[0]):
if status:
_object.insert(0, blankRow)
status = False
else:
_object.insert(len(_object) - 1, blankRow)
status = True
if len(_object) > len(_object[0]):
status = True
while len(_object) > len(_object[0]):
if status:
status = False
else:
status = True
for row in _object:
if status:
row.insert(0, 0)
else:
row.append(0)
_lv = _widthEnd - _widthStart
_lh = _heightEnd - _heightStart
_lmax = _lv if _lv > _lh else _lh
imageL = ImageL(_object, _lv, _lh, _lmax)
_objectArray.append(imageL)
return _objectArray
# Przyjmuje jako argument obraz jako tablicę. Zamienia wszystkie wartości różne od zera na kolor biały
def prettyWhite(array):
array = array.astype('uint8')
for x in range(1, len(array) - 1):
for y in range(1, len(array[0]) - 1):
if array[x, y] != 0:
array[x, y] = 255
return array
# Wykrywa środek ciężkości obiektu. Jako argument przyjmuje TkInter Image
def findCenter(image):
imageArray = asarray(image).copy()
imageArray = imageArray.astype('uint8')
imageArray.setflags(write=True)
# Inicjalizacja zmiennych
m00 = 0
m10 = 0
m01 = 0
# Obliczanie momentów geometrycznych
for x in range(1, image.size[1] - 1):
for y in range(1, image.size[0] - 1):
_pixel = 0
if imageArray[x, y] != 0:
_pixel = 1
m00 = m00 + _pixel
m10 = m10 + (x * _pixel)
m01 = m01 + (y * _pixel)
# print(m00)
# print(m10)
# print(m01)
# Wyznaczanie koordynatów środka ciężkości coords[i,j]
i = int(m10 / m00)
j = int(m01 / m00)
# Rysuje plusa w środku ciężkości
# TODO - Make it better
imageArray[i, j] = 0
imageArray[i, j + 1] = 0
imageArray[i, j - 1] = 0
imageArray[i, j + 2] = 0
imageArray[i, j - 2] = 0
imageArray[i + 1, j] = 0
imageArray[i - 1, j] = 0
imageArray[i + 2, j] = 0
imageArray[i - 2, j] = 0
# Parsuje 1 na 255 żeby obraz był widoczny
for x in range(1, image.size[1] - 1):
for y in range(1, image.size[0] - 1):
if imageArray[x, y] == 1:
imageArray[x, y] = 255
# TODO Poprawić jakoś tą "L" żeby było bardziej uniwersalne - RGB, RGBA etc
transformedImage = Image.fromarray(imageArray, "L")
return transformedImage, [i, j], [m00, m01, m10]
# Znajdź odległość do obwodu obiektu na podstawie podanych koordynatów
def findSizeToCircuit(imageCircuitArray, fromX, fromY):
surface = 0
minSize = 99999
maxSize = 0
for x in range(0, len(imageCircuitArray) - 1):
for y in range(0, len(imageCircuitArray[0]) - 1):
if imageCircuitArray[x, y] == 255:
localSize = math.sqrt(pow(x - fromX, 2) + pow(y - fromY, 2))
if localSize < minSize:
minSize = localSize
if localSize > maxSize:
maxSize = localSize
print(fromX, fromY, "MIN: ", minSize, "MAX: ", maxSize)
return {
"max": maxSize,
"min": minSize
}
# Klasa przechowująca dane pojedynczego obrazka
# - ścieżka obrazu
# - obraz
# - TKInter Image Component
# - imageArray
# - lista wykrytych obiektów
#
# Są dwa sposoby na konstruktor tej klasy
# Pierwszy przyjmuje tylko ścieżkę do obrazka, natomiast drugi
# przyjmuje instancję samego siebie (deep copy)
class FullImage:
def __init__(self, path=0, img=0):
# Pierwszy sposób na stworzenie instancji FullImage - konstruktor ze ścieżką
if path != 0:
self.path = path
self.image = Image.open(path)
self.imageComponent = ImageTk.PhotoImage(self.image)
self.imageArray = self.getImageArray()
self.detectedObjects = []
# Drugi sposób na stworzenie instancji FullImage - konstruktor z obiektem samego siebie - deep copy
elif img != 0:
self.path = img.path
self.image = img.image
self.imageComponent = img.imageComponent
self.imageArray = img.imageArray.copy()
self.detectedObjects = img.detectedObjects.copy()
# Zwraca obraz obiektu jako tablica, oraz aktualizuje ten parametr klasy
def getImageArray(self):
_imageArray = asarray(self.image).copy()
_imageArray = _imageArray.astype('uint8')
_imageArray.setflags(write=True)
return _imageArray
# Jako argument przyjmuje instancję samego siebie. Różnica pomiędzy konstruktorem z instancją a poniższą metodą
# jest taka, że tutaj tylko aktualizujemy parametry a tam tworzymy nowy obiekt
def refreshImageState(self, image):
self.image = image.image
self.imageArray = image.imageArray
self.detectedObjects = image.detectedObjects
self.imageComponent = image.imageComponent
# Wykrywa obiekty w obrazie na podstawie obrazu jako tablica. Aktualizuje obecną tablicę na zindeksowaną
def labelObjects(self):
self.imageArray = label(self.imageArray)
self.imageArray = self.imageArray.astype('uint8')
# Wyszukuje i zwraca wszystkie kolory istniejące na obrazie (potrzebne do ładnego pokolorowania zaindeksowanych
# części obrazów
def getImageColors(self):
_allColors = []
for x in range(1, self.image.size[1] - 1):
for y in range(1, self.image.size[0] - 1):
_pixel = self.imageArray[x, y]
if _pixel != 0 and _pixel not in _allColors:
_allColors.append(_pixel)
return _allColors
# Koloruje obiekty w tablicy obrazu aktualizując ją
def prettyColors(self):
_allColors = self.getImageColors()
_jump = int(200 / len(_allColors))
for x in range(1, self.image.size[1] - 1):
for y in range(1, self.image.size[0] - 1):
if (self.imageArray[x, y] * _jump) > 255:
print("Error: Poza zakresem")
if self.imageArray[x, y] != 0:
self.imageArray[x, y] = self.imageArray[x, y] * _jump + 50
# przechowuje parametry lv, lh i lmax obrazka
class ImageL:
def __init__(self, image, lv, lh, lmax):
self.image = image
self.lv = lv
self.lh = lh
self.lmax = lmax
# Przechowuje i oblicza wszystkie wyświetlane parametry obiektu
class ImageObject:
def __init__(self, image, imageArray, edgeArray, imageEdge, size, surfaceArea, circuit, center, centerToPrint,
lobject, mParameters):
# Obraz
self.image = image
# Tablica obrazu
self.imageArray = imageArray
# Tablica z obwodem obrazu
self.edgeArray = edgeArray
# Obraz z obwodem
self.imageEdges = imageEdge
# Rozmiar obrazu
self.size = size
# Pole powierzchni
self.surfaceArea = surfaceArea
# Obwód
self.circuit = circuit
# Ręcznie obliczone koordynaty obrazka
self.center = center
# Koordynaty wyliczone przez bibliotekę
self.centerToPrint = centerToPrint
# Obiekt lv, lh, lmax
self.lobject = lobject
# Przechowuje parametry momentów geometrycznych
self.mParameters = mParameters
# Najmniejsza odległość od środka obiektu do obwodu
self.rmin = round(findSizeToCircuit(edgeArray, center[0], center[1])["min"], 2)
# Największa odległość od środka obiektu do obwodu
self.rmax = round(findSizeToCircuit(edgeArray, center[0], center[1])["max"], 2)
# Wartość współczynnika w1
self.w1 = 2 * math.sqrt((surfaceArea / math.pi))
# Wartość współczynnika w2
self.w2 = circuit / math.pi
# Wartość współczynnika w3
self.w3 = (circuit / (2 * math.sqrt(math.pi * surfaceArea))) - 1
# obliczanie E potrzebnego do wartości współczynnika w4
w4sum = 0
for x in range(1, len(edgeArray) - 1):
for y in range(1, len(edgeArray[0]) - 1):
if edgeArray[x][y] != 0:
w4sum += (pow(x - center[0], 2) + pow(y - center[1], 2))
# Wartość współczynnika w4
self.w4 = surfaceArea / math.sqrt(2 * math.pi * w4sum)
# zakomentowane ze względu na czasochłonność operacji
# Wartość współczynnika w5
w5sum = 1
# for x in range(1, len(imageArray) - 1):
# for y in range(1, len(imageArray[0]) - 1):
# if imageArray[x][y] != 0:
# w5sum += findSizeToCircuit(edgeArray, x, y)["min"]
# self.w5 = pow(surfaceArea, 3) / pow(w5sum, 2)
self.w5 = 0
# obliczanie wartości E dla współczynnika w6
w6sum1 = 0
w6sum2 = 0
for x in range(1, len(imageArray) - 1):
for y in range(1, len(imageArray[0]) - 1):
if imageArray[x][y] != 0:
w6sum1 += math.sqrt(pow(x - center[0], 2) + pow(y - center[1], 2))
w6sum2 += (pow(x - center[0], 2) + pow(y - center[1], 2))
# Wartość współczynnika w6
self.w6 = math.sqrt(pow(w6sum1, 2) / ((circuit * w6sum2) - 1))
# Wartość współczynnika w7
self.w7 = self.rmin / self.rmax
# Wartość współczynnika w8
self.w8 = lobject.lmax / circuit
# Wartość współczynnika w9
self.w9 = (2 * math.sqrt(math.pi * surfaceArea)) / circuit
# Wartość współczynnika w10
self.w10 = lobject.lh / lobject.lv
# Graficzny element wyświetlanego pojedynczego wiersza z danymi obiektu
class ListViewRow(tk.Frame):
def __init__(self, parent, imageObject, index):
super().__init__(parent)
self.grid()
self.configure(bg='#eeeeee')
self.columnconfigure(0, minsize=120)
self.columnconfigure(1, minsize=120)
self.columnconfigure(2, minsize=200)
self.columnconfigure(3, minsize=250)
self.columnconfigure(4, minsize=250)
self.titleLabel = Label(self, text="Znaleziony obiekt " + str(index + 1))
self.titleLabel.grid(row=0, column=0, columnspan=2, sticky=W)
self.canvas = Canvas(self, width=imageObject.size[0], height=imageObject.size[1])
self.canvas.grid(row=1, column=0, rowspan=6, sticky=W)
self.canvas.create_image(0, 0, anchor=NW, image=imageObject.image)
self.canvasEdges = Canvas(self, width=imageObject.size[0], height=imageObject.size[1])
self.canvasEdges.grid(row=1, column=1, rowspan=6, sticky=W)
self.canvasEdges.create_image(0, 0, anchor=NW, image=imageObject.imageEdges)
textSurface = "Pole powierzchni: " + str(imageObject.surfaceArea)
self.surfaceLabel = Label(self, text=textSurface)
self.surfaceLabel.grid(row=1, column=2, sticky=NW)
textCircuit = "Obwód: " + str(imageObject.circuit)
self.circuitLabel = Label(self, text=textCircuit)
self.circuitLabel.grid(row=2, column=2, sticky=NW)
textLh = "Lh = " + str(imageObject.lobject.lh)
self.lhLabel = Label(self, text=textLh)
self.lhLabel.grid(row=3, column=2, sticky=NW)
textLv = "Lv = " + str(imageObject.lobject.lv)
self.lvLabel = Label(self, text=textLv)
self.lvLabel.grid(row=4, column=2, sticky=NW)
textLmax = "Lmax = " + str(imageObject.lobject.lmax)
self.lmaxLabel = Label(self, text=textLmax)
self.lmaxLabel.grid(row=5, column=2, sticky=NW)
self.separator = ttk.Separator(self, orient='horizontal')
self.separator.grid(row=10, column=0, columnspan=6, sticky=EW)
# -------------------------------------------
textm00 = "m00 = " + str(imageObject.mParameters[0])
self.m00Label = Label(self, text=textm00)
self.m00Label.grid(row=1, column=3, sticky=NW)
textm01 = "m01 = " + str(imageObject.mParameters[1])
self.m01Label = Label(self, text=textm01)
self.m01Label.grid(row=2, column=3, sticky=NW)
textm10 = "m10 = " + str(imageObject.mParameters[2])
self.m10Label = Label(self, text=textm10)
self.m10Label.grid(row=3, column=3, sticky=NW)
textRmin = "rmin = " + str(imageObject.rmin)
self.rminlabel = Label(self, text=textRmin)
self.rminlabel.grid(row=4, column=3, sticky=NW)
textRmax = "Rmax = " + str(imageObject.rmax)
self.rmaxlabel = Label(self, text=textRmax)
self.rmaxlabel.grid(row=5, column=3, sticky=NW)
textCenter = "Środek ciężkości: (x " + str(imageObject.centerToPrint[0]) + ", y " + str(
imageObject.centerToPrint[1]) + ")"
self.centerLabel = Label(self, text=textCenter)
self.centerLabel.grid(row=6, column=3, sticky=NW)
# -------------------------------------------
textW1 = "W1 = " + str(round(imageObject.w1, 2))
self.w1Label = Label(self, text=textW1)
self.w1Label.grid(row=1, column=4, sticky=NW)
textW2 = "W2 = " + str(round(imageObject.w2, 2))
self.w2Label = Label(self, text=textW2)
self.w2Label.grid(row=2, column=4, sticky=NW)
textW3 = "W3 = " + str(round(imageObject.w3, 2))
self.w3Label = Label(self, text=textW3)
self.w3Label.grid(row=3, column=4, sticky=NW)
textW4 = "W4 = " + str(round(imageObject.w4, 2))
self.w4Label = Label(self, text=textW4)
self.w4Label.grid(row=4, column=4, sticky=NW)
textW5 = "W5 = " + str(round(imageObject.w5, 2))
self.w5Label = Label(self, text=textW5)
self.w5Label.grid(row=5, column=4, sticky=NW)
textW6 = "W6 = " + str(round(imageObject.w6, 2))
self.w6Label = Label(self, text=textW6)
self.w6Label.grid(row=1, column=5, sticky=NW)
textW8 = "W7 = " + str(round(imageObject.w7, 2))
self.w7Label = Label(self, text=textW8)
self.w7Label.grid(row=2, column=5, sticky=NW)
textW8 = "W8 = " + str(round(imageObject.w8, 2))
self.w8Label = Label(self, text=textW8)
self.w8Label.grid(row=3, column=5, sticky=NW)
textW9 = "W9 = " + str(round(imageObject.w9, 2))
self.w9Label = Label(self, text=textW9)
self.w9Label.grid(row=4, column=5, sticky=NW)
textW10 = "W10 = " + str(round(imageObject.w10, 2))
self.w10Label = Label(self, text=textW10)
self.w10Label.grid(row=5, column=5, sticky=NW)
self.titleLabel.config(bg="#eeeeee")
self.surfaceLabel.config(bg="#eeeeee")
self.circuitLabel.config(bg="#eeeeee")
self.lhLabel.config(bg="#eeeeee")
self.lvLabel.config(bg="#eeeeee")
self.lmaxLabel.config(bg="#eeeeee")
self.m00Label.config(bg="#eeeeee")
self.m01Label.config(bg="#eeeeee")
self.m10Label.config(bg="#eeeeee")
self.centerLabel.config(bg="#eeeeee")
self.w1Label.config(bg="#eeeeee")
self.w2Label.config(bg="#eeeeee")
self.w3Label.config(bg="#eeeeee")
self.w4Label.config(bg="#eeeeee")
self.w5Label.config(bg="#eeeeee")
self.w6Label.config(bg="#eeeeee")
self.w7Label.config(bg="#eeeeee")
self.w8Label.config(bg="#eeeeee")
self.w9Label.config(bg="#eeeeee")
self.w10Label.config(bg="#eeeeee")
# aby obrazki w liście były widoczne, muszą mieć one stałe miejsce w pamięci
# to jest powód dla którego te tablice muszą istnieć
IMAGE_ARRAY = []
IMAGE_EDGE_ARRAY = []
# Frame dla całej wyświetlanej listy. Po kliknięciu w button "Indeksuj i licz"
# jest ona przebudowywana na nowo
# TODO - ScrollView - nie udało nam się go zrobić
class ListView(tk.Frame):
def __init__(self, parent, detectedObjects):
super().__init__(parent)
counter = 0
IMAGE_ARRAY.clear()
IMAGE_EDGE_ARRAY.clear()
self.elements_frame = Frame(self, width=1100, height=780, bg='#eeeeee')
self.elements_frame.grid(row=1, column=1, pady=20, padx=20, sticky=N)
self.elements_content = Frame(self, width=1050, height=650, bg='#eeeeee')
self.elements_content.grid(row=1, column=1, pady=20, padx=20, sticky=N)
# dla każdego obiektu w znalezionych obiektach wylicz wartości i stwórz wiersz
for lobject in detectedObjects:
_array = np.array(lobject.image, dtype=np.uint8)
_regionprops = regionprops(_array)
print("Area", _regionprops[0]['Area'])
print("Centeroid", _regionprops[0]['Centroid'][0])
print("Perimeter", )
_edgeArray = filters.roberts(_array).astype('uint8')
_edgeArray = prettyWhite(_edgeArray)
_image = Image.fromarray(_array, "L").resize((100, 100))
_imageEdge = Image.fromarray(_edgeArray, "L").resize((100, 100))
_imageSize = _image.size
_surfaceArea = round(_regionprops[0]['Area'])
_circuit = round(_regionprops[0]['Perimeter'])
centerObject = findCenter(_image)
_image = centerObject[0]
_centerCoordsToPrint = centerObject[1]
_centerCoords = [round(_regionprops[0]['Centroid'][0]), round(_regionprops[0]['Centroid'][1])]
_mParameters = centerObject[2]
_imageComponent = ImageTk.PhotoImage(_image)
_imageEdgeComponent = ImageTk.PhotoImage(_imageEdge)
IMAGE_ARRAY.append(_imageComponent)
IMAGE_EDGE_ARRAY.append(_imageEdgeComponent)
imageObject = ImageObject(
image=IMAGE_ARRAY[counter],
imageArray=_array,
edgeArray=_edgeArray,
imageEdge=IMAGE_EDGE_ARRAY[counter],
size=_imageSize,
surfaceArea=_surfaceArea,
circuit=_circuit,
center=_centerCoords,
centerToPrint=_centerCoordsToPrint,
lobject=lobject,
mParameters=_mParameters)
self.row = ListViewRow(self.elements_content, imageObject, counter)
counter += 1
# Główna klasa aplikacji. Odpowiada za wszystko co widzimy
class Window(object):
def __init__(self):
self.master = tk.Tk()
self.master.title('Współczynniki kształtu, momenty geometryczne, wykrywanie centroidów')
self.master.maxsize(1500, 1000)
# Podział
self.left_frame = Frame(self.master, width=190, height=800)
self.left_frame.grid(rowspan=2, column=0, padx=10, pady=5, sticky=N)
self.center_frame = Frame(self.master, width=800, height=200)
self.center_frame.grid(row=0, column=1, padx=0, pady=10, sticky=N)
self.elements_frame = Frame(self.master, width=800, height=780)
self.elements_frame.grid(row=1, column=1, pady=10, sticky=N)
# Inicjalizacja obrazków z których chcemy korzystać
self.listImage1 = FullImage(path='img1.bmp')
self.listImage2 = FullImage(path='img2.bmp')
self.listImage3 = FullImage(path='img3.bmp')
self.listImage4 = FullImage(path='img4.bmp')
self.listImage5 = FullImage(path='img5.bmp')
self.listImage6 = FullImage(path='img6.bmp')
self.setup_new_image(self.listImage1)
# Dodaj listenery do obrazków w liście z lewej strony aplikacji
self.setUpImageButtons()
# Orginalny obrazek - lewy górny róg
self.canvasOrginalImage = tk.Canvas(self.center_frame, width=128, height=128)
self.canvasOrginalImage.grid(row=0, column=0, sticky=N, pady=2)
self.orginalImageOnCanvas = self.canvasOrginalImage.create_image(0, 0, anchor='nw',
image=self.orginalImage.imageComponent)
self.orginalImageLabel = Label(self.center_frame, text="Obrazek orginalny")
self.orginalImageLabel.grid(row=1, column=0, sticky=N)
# Zmodyfikowany obrazek - prawy górny róg
self.canvasParsedImage = tk.Canvas(self.center_frame, width=128, height=128)
self.canvasParsedImage.grid(row=0, column=2, sticky=N, pady=2)
self.parsedImageOnCanvas = self.canvasParsedImage.create_image(0, 0, anchor='nw',
image=self.transformedImage.imageComponent)
self.parsedImageLabel = Label(self.center_frame, text="Obrazek zaindeksowany\n(po wykonaniu operacji)")
self.parsedImageLabel.grid(row=1, column=2, sticky=N)
# Button - Wykonaj akcje - lewy dolny róg
self.listView = ListView(self.elements_frame, self.transformedImage.detectedObjects)
self.listView.grid(row=0, column=0, sticky=N, pady=10)
# Button - Wykonaj akcje - lewy dolny róg
self.button = tk.Button(self.center_frame, width=30, text='Indeksuj i licz', command=self.on_click)
self.button.grid(row=0, column=1, padx=10)
# idk
self.master.mainloop()
# Dodaj listenery do obrazków w liście z lewej strony aplikacji
def setUpImageButtons(self):
ttk.Button(self.left_frame, image=self.listImage1.imageComponent,
command=lambda: self.on_click_image(self.listImage1, 1)).grid(column=0, row=0, sticky=N + W, pady=5)
ttk.Button(self.left_frame, image=self.listImage2.imageComponent,
command=lambda: self.on_click_image(self.listImage2, 2)).grid(column=0, row=1, sticky=N + W, pady=5)
ttk.Button(self.left_frame, image=self.listImage3.imageComponent,
command=lambda: self.on_click_image(self.listImage3, 3)).grid(column=0, row=2, sticky=N + W, pady=5)
ttk.Button(self.left_frame, image=self.listImage4.imageComponent,
command=lambda: self.on_click_image(self.listImage4, 4)).grid(column=0, row=3, sticky=N + W, pady=5)
ttk.Button(self.left_frame, image=self.listImage5.imageComponent,
command=lambda: self.on_click_image(self.listImage5, 5)).grid(column=0, row=4, sticky=N + W, pady=5)
ttk.Button(self.left_frame, image=self.listImage6.imageComponent,
command=lambda: self.on_click_image(self.listImage6, 5)).grid(column=0, row=5, sticky=N + W, pady=5)
# Przeładuj listę (wykonywane po wyyborze nowego obrazka)
def refreshList(self):
self.listView.destroy()
self.listView = ListView(self.master, self.transformedImage.detectedObjects)
self.listView.grid(row=1, column=1, pady=10, sticky=N)
# akcja wykonywana po wyborze nowego obrazka. Aktualizuje nagłówek aplikacji
def on_click(self):
_image = FullImage(img=getSmallerImages(self.transformedImage))
self.transformedImage.refreshImageState(_image)
self.canvasParsedImage.itemconfig(self.parsedImageOnCanvas, image=_image.imageComponent)
self.refreshList()
def setup_new_image(self, image):
newImage = FullImage(img=image)
self.orginalImage = newImage
self.transformedImage = newImage
return newImage
def on_click_image(self, image, index):
newImage = self.setup_new_image(image)
self.orginalImage.refreshImageState(newImage)
self.transformedImage.refreshImageState(newImage)
self.canvasOrginalImage.itemconfig(self.orginalImageOnCanvas, image=newImage.imageComponent)
self.canvasParsedImage.itemconfig(self.parsedImageOnCanvas, image=newImage.imageComponent)
self.refreshList()
Window()
| [
"tkinter.ttk.Separator",
"PIL.ImageTk.PhotoImage",
"tkinter.Canvas",
"math.sqrt",
"tkinter.Button",
"numpy.asarray",
"PIL.Image.open",
"skimage.measure.label",
"skimage.filters.roberts",
"numpy.array",
"PIL.Image.fromarray",
"tkinter.Tk",
"skimage.measure.regionprops"
] | [((1039, 1077), 'PIL.Image.fromarray', 'Image.fromarray', (['image.imageArray', '"""L"""'], {}), "(image.imageArray, 'L')\n", (1054, 1077), False, 'from PIL import Image, ImageTk\n'), ((1105, 1136), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['image.image'], {}), '(image.image)\n', (1123, 1136), False, 'from PIL import Image, ImageTk\n'), ((5116, 5148), 'PIL.Image.fromarray', 'Image.fromarray', (['imageArray', '"""L"""'], {}), "(imageArray, 'L')\n", (5131, 5148), False, 'from PIL import Image, ImageTk\n'), ((7826, 7848), 'skimage.measure.label', 'label', (['self.imageArray'], {}), '(self.imageArray)\n', (7831, 7848), False, 'from skimage.measure import label, regionprops\n'), ((14270, 14310), 'tkinter.ttk.Separator', 'ttk.Separator', (['self'], {'orient': '"""horizontal"""'}), "(self, orient='horizontal')\n", (14283, 14310), True, 'import tkinter.ttk as ttk\n'), ((20924, 20931), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (20929, 20931), True, 'import tkinter as tk\n'), ((22134, 22185), 'tkinter.Canvas', 'tk.Canvas', (['self.center_frame'], {'width': '(128)', 'height': '(128)'}), '(self.center_frame, width=128, height=128)\n', (22143, 22185), True, 'import tkinter as tk\n'), ((22695, 22746), 'tkinter.Canvas', 'tk.Canvas', (['self.center_frame'], {'width': '(128)', 'height': '(128)'}), '(self.center_frame, width=128, height=128)\n', (22704, 22746), True, 'import tkinter as tk\n'), ((23478, 23568), 'tkinter.Button', 'tk.Button', (['self.center_frame'], {'width': '(30)', 'text': '"""Indeksuj i licz"""', 'command': 'self.on_click'}), "(self.center_frame, width=30, text='Indeksuj i licz', command=self\n .on_click)\n", (23487, 23568), True, 'import tkinter as tk\n'), ((3825, 3839), 'numpy.asarray', 'asarray', (['image'], {}), '(image)\n', (3832, 3839), False, 'from numpy import asarray\n'), ((6430, 6446), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (6440, 6446), False, 'from PIL import Image, ImageTk\n'), ((6481, 6511), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['self.image'], {}), '(self.image)\n', (6499, 6511), False, 'from PIL import Image, ImageTk\n'), ((10414, 10446), 'math.sqrt', 'math.sqrt', (['(surfaceArea / math.pi)'], {}), '(surfaceArea / math.pi)\n', (10423, 10446), False, 'import math\n'), ((10998, 11028), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * w4sum)'], {}), '(2 * math.pi * w4sum)\n', (11007, 11028), False, 'import math\n'), ((19040, 19079), 'numpy.array', 'np.array', (['lobject.image'], {'dtype': 'np.uint8'}), '(lobject.image, dtype=np.uint8)\n', (19048, 19079), True, 'import numpy as np\n'), ((19107, 19126), 'skimage.measure.regionprops', 'regionprops', (['_array'], {}), '(_array)\n', (19118, 19126), False, 'from skimage.measure import label, regionprops\n'), ((20006, 20032), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['_image'], {}), '(_image)\n', (20024, 20032), False, 'from PIL import Image, ImageTk\n'), ((20067, 20097), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['_imageEdge'], {}), '(_imageEdge)\n', (20085, 20097), False, 'from PIL import Image, ImageTk\n'), ((7097, 7116), 'numpy.asarray', 'asarray', (['self.image'], {}), '(self.image)\n', (7104, 7116), False, 'from numpy import asarray\n'), ((12164, 12196), 'math.sqrt', 'math.sqrt', (['(math.pi * surfaceArea)'], {}), '(math.pi * surfaceArea)\n', (12173, 12196), False, 'import math\n'), ((10589, 10621), 'math.sqrt', 'math.sqrt', (['(math.pi * surfaceArea)'], {}), '(math.pi * surfaceArea)\n', (10598, 10621), False, 'import math\n'), ((19299, 19322), 'skimage.filters.roberts', 'filters.roberts', (['_array'], {}), '(_array)\n', (19314, 19322), False, 'from skimage import filters\n'), ((19410, 19438), 'PIL.Image.fromarray', 'Image.fromarray', (['_array', '"""L"""'], {}), "(_array, 'L')\n", (19425, 19438), False, 'from PIL import Image, ImageTk\n'), ((19483, 19515), 'PIL.Image.fromarray', 'Image.fromarray', (['_edgeArray', '"""L"""'], {}), "(_edgeArray, 'L')\n", (19498, 19515), False, 'from PIL import Image, ImageTk\n')] |
import unittest
import random
from ephem.stars import stars
import katpoint
import numpy as np
from katacomb.mock_dataset import (MockDataSet,
ANTENNA_DESCRIPTIONS,
DEFAULT_TIMESTAMPS)
from katacomb import (AIPSPath,
KatdalAdapter,
obit_context,
uv_factory)
from katacomb.tests.test_aips_path import file_cleaner
class TestAipsFacades(unittest.TestCase):
"""
Test basic visibility reading and writing from a AIPS UV Facade object
"""
def test_uv_facade_read_write(self):
"""
Test basic reads and writes the AIPS UV Facade
"""
nvis = 577 # Read/write this many visibilities, total
nvispio = 20 # Read/write this many visibilities per IO op
uv_file_path = AIPSPath('test', 1, 'test', 1)
# Set up the spectral window
nchan = 4
spws = [{
'centre_freq': .856e9 + .856e9 / 2.,
'num_chans': nchan,
'channel_width': .856e9 / nchan,
'sideband': 1,
'band': 'L',
}]
# Use first four antenna to create the subarray
subarrays = [{'antenna': ANTENNA_DESCRIPTIONS[:4]}]
# Pick 5 random stars as targets
targets = [katpoint.Target("%s, star" % t) for t in
random.sample(stars.keys(), 5)]
# track for 5 on each target
slew_track_dumps = (('track', 5),)
scans = [(e, nd, t) for t in targets
for e, nd in slew_track_dumps]
# Create Mock dataset and wrap it in a KatdalAdapter
KA = KatdalAdapter(MockDataSet(timestamps=DEFAULT_TIMESTAMPS,
subarrays=subarrays, spws=spws, dumps=scans))
with obit_context(), file_cleaner(uv_file_path):
# Create the UV file
with uv_factory(aips_path=uv_file_path,
mode="w",
nvispio=nvispio,
table_cmds=KA.default_table_cmds(),
desc=KA.uv_descriptor()) as uvf:
uv_desc = uvf.Desc.Dict
# Length of visibility buffer record
lrec = uv_desc['lrec']
# Random parameter indices
iloct = uv_desc['iloct'] # time
# Write out visibilities, putting sequential values
# in the time random parameter
for firstVis in range(1, nvis+1, nvispio):
numVisBuff = min(nvis+1-firstVis, nvispio)
uv_desc = uvf.Desc.Dict
uv_desc['numVisBuff'] = numVisBuff
uvf.Desc.Dict = uv_desc
times = np.arange(firstVis, firstVis+numVisBuff, dtype=np.float32)
buf = uvf.np_visbuf
buf[iloct:lrec*numVisBuff:lrec] = times
uvf.Write(firstVis=firstVis)
# Now re-open in readonly mode and test
# that we get the same sequential values out
with uv_factory(aips_path=uv_file_path,
mode="r",
nvispio=nvispio) as uvf:
uv_desc = uvf.Desc.Dict
# Length of visibility buffer record
lrec = uv_desc['lrec']
nvis = uv_desc['nvis']
# Random parameter indices
iloct = uv_desc['iloct'] # time
for firstVis in range(1, nvis+1, nvispio):
numVisBuff = min(nvis+1-firstVis, nvispio)
uv_desc = uvf.Desc.Dict
uv_desc['numVisBuff'] = numVisBuff
uvf.Desc.Dict = uv_desc
uvf.Read(firstVis=firstVis)
buf = uvf.np_visbuf
times = np.arange(firstVis, firstVis+numVisBuff, dtype=np.float32)
buf_times = buf[iloct:lrec*numVisBuff:lrec]
self.assertTrue(np.all(times == buf_times))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"katacomb.AIPSPath",
"katacomb.obit_context",
"numpy.arange",
"katpoint.Target",
"katacomb.tests.test_aips_path.file_cleaner",
"katacomb.uv_factory",
"katacomb.mock_dataset.MockDataSet",
"numpy.all",
"ephem.stars.stars.keys"
] | [((4139, 4154), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4152, 4154), False, 'import unittest\n'), ((869, 899), 'katacomb.AIPSPath', 'AIPSPath', (['"""test"""', '(1)', '"""test"""', '(1)'], {}), "('test', 1, 'test', 1)\n", (877, 899), False, 'from katacomb import AIPSPath, KatdalAdapter, obit_context, uv_factory\n'), ((1342, 1373), 'katpoint.Target', 'katpoint.Target', (["('%s, star' % t)"], {}), "('%s, star' % t)\n", (1357, 1373), False, 'import katpoint\n'), ((1697, 1788), 'katacomb.mock_dataset.MockDataSet', 'MockDataSet', ([], {'timestamps': 'DEFAULT_TIMESTAMPS', 'subarrays': 'subarrays', 'spws': 'spws', 'dumps': 'scans'}), '(timestamps=DEFAULT_TIMESTAMPS, subarrays=subarrays, spws=spws,\n dumps=scans)\n', (1708, 1788), False, 'from katacomb.mock_dataset import MockDataSet, ANTENNA_DESCRIPTIONS, DEFAULT_TIMESTAMPS\n'), ((1827, 1841), 'katacomb.obit_context', 'obit_context', ([], {}), '()\n', (1839, 1841), False, 'from katacomb import AIPSPath, KatdalAdapter, obit_context, uv_factory\n'), ((1843, 1869), 'katacomb.tests.test_aips_path.file_cleaner', 'file_cleaner', (['uv_file_path'], {}), '(uv_file_path)\n', (1855, 1869), False, 'from katacomb.tests.test_aips_path import file_cleaner\n'), ((3140, 3201), 'katacomb.uv_factory', 'uv_factory', ([], {'aips_path': 'uv_file_path', 'mode': '"""r"""', 'nvispio': 'nvispio'}), "(aips_path=uv_file_path, mode='r', nvispio=nvispio)\n", (3150, 3201), False, 'from katacomb import AIPSPath, KatdalAdapter, obit_context, uv_factory\n'), ((1416, 1428), 'ephem.stars.stars.keys', 'stars.keys', ([], {}), '()\n', (1426, 1428), False, 'from ephem.stars import stars\n'), ((2804, 2864), 'numpy.arange', 'np.arange', (['firstVis', '(firstVis + numVisBuff)'], {'dtype': 'np.float32'}), '(firstVis, firstVis + numVisBuff, dtype=np.float32)\n', (2813, 2864), True, 'import numpy as np\n'), ((3919, 3979), 'numpy.arange', 'np.arange', (['firstVis', '(firstVis + numVisBuff)'], {'dtype': 'np.float32'}), '(firstVis, firstVis + numVisBuff, dtype=np.float32)\n', (3928, 3979), True, 'import numpy as np\n'), ((4078, 4104), 'numpy.all', 'np.all', (['(times == buf_times)'], {}), '(times == buf_times)\n', (4084, 4104), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import numpy as np
import scipy.stats
from app import app
from apps.commons import gen_header, common_fig_layout
# global variables
x_max = 6.
n_points = 200
x = np.linspace(-x_max, x_max, n_points)
# components of the app
# header text plus logo
header = gen_header("Cohen's d-value", logo='/assets/icons8-return-96.png', href='/toc')
# Plotly figure
fig_display = dcc.Graph(id='fig-display')
# sliders for delta_mu (difference of means), sigma_1 (std-dev of 1st distribution)
# and sigma_2 (std-dev of 2nd distribution)
sliders = [
# delta_mu
html.Div([
html.Label('set \u0394\u03BC:', className="control_label"),
dcc.Slider(id='delta-mu',
min=0., max=5., step=0.2, value=2.0,
marks={i: '{:.1f}'.format(i) for i in range(0, 6)},
className='dcc_control')
], className='w3-container w3-padding w3-third'),
# sigma_1
html.Div([
html.Label('set \u03C3\u2081:', className="control_label"),
dcc.Slider(id='sigma-1',
min=0.5, max=2.0, step=0.1, value=1.,
marks={0.5: '0.5', 1: '1.0', 1.5: '1.5', 2: '2.0'},
className='dcc_control')
], className='w3-container w3-padding w3-third'),
# sigma_2
html.Div([
html.Label('set \u03C3\u2082:', className="control_label"),
dcc.Slider(id='sigma-2',
min=0.5, max=2.0, step=0.1, value=1.,
marks={0.5: '0.5', 1: '1.0', 1.5: '1.5', 2: '2.0'},
className='dcc_control')
], className='w3-container w3-padding w3-third')
]
layout = html.Div([
html.Div(header, className='w3-row'),
html.Div([
html.Div([
html.P("""
Cohen's d-value is a measure of the effect size (e.g. difference between control and treatment group)
calculated using the difference of means scaled by a pooled standard deviation. Using
population parameters, it is defined as:
"""),
html.Img(src='/assets/Cohen_d.svg', style={'display': 'block', 'margin-left': 'auto',
'margin-right': 'auto', 'width': '67%'}),
html.P("""
The d-value is a dimensionless quantity and can be employed across scientific disciplines. It is frequently
used in estimating necessary sample sizes for statistical testing.
"""),
html.P("""
Smaller d-values indicate a stronger overlap of the distributions of measured quantities for the two groups.
Use the sliders to change the values for \u0394\u03BC, \u03C3\u2081 and \u03C3\u2082.
""")
], className='w3-container w3-col m3 w3-padding'),
html.Div([
fig_display,
html.Div(sliders, className='w3-row')
], className='w3-container w3-col m9 w3-padding')
], className='w3-row'),
], className='w3-container w3-padding'
)
@app.callback(
Output('fig-display', 'figure'),
[Input('delta-mu', 'value'),
Input('sigma-1', 'value'),
Input('sigma-2', 'value')]
)
def gen_figure(delta_mu, sigma_1, sigma_2):
control = go.Scatter(
x=x,
y=scipy.stats.norm.pdf(x, scale=sigma_1),
mode='none',
fill='tozeroy',
fillcolor='rgba(152,78,163,0.5)',
name='control group',
showlegend=True,
)
effect = go.Scatter(
x=x + delta_mu,
y=scipy.stats.norm.pdf(x, scale=sigma_2),
mode='none',
fill='tozeroy',
fillcolor='rgba(77,175,74,0.5)',
name='treatment group',
showlegend=True,
)
data = [control, effect]
d = delta_mu/np.sqrt((sigma_1**2 + sigma_2**2)/2.)
fig_title = f"Effect size: d={d:.2f}<br>" + \
f"(\u0394\u03BC={delta_mu:.1f}, " + \
f"\u03C3<sub>1</sub>={sigma_1:.1f}, " + \
f"\u03C3<sub>2</sub>={sigma_2:.1f})"
fig_layout = {
'xaxis': {'title': {'text': 'measured quantity'}},
'yaxis': {'title': {'text': 'pdf'}},
'legend': {'xanchor': 'right', 'yanchor': 'top', 'x': 1, 'y': 1},
'title': go.layout.Title(text=fig_title, xref="paper", x=0)
}
fig_layout.update(common_fig_layout)
return go.Figure(data=data, layout=fig_layout)
if __name__ == '__main__':
app.title = "Cohen's d-value"
app.layout = layout
app.run_server(debug=True)
| [
"plotly.graph_objs.layout.Title",
"apps.commons.gen_header",
"dash_core_components.Slider",
"dash_html_components.Div",
"dash_html_components.Label",
"dash.dependencies.Input",
"dash_html_components.P",
"dash_html_components.Img",
"numpy.linspace",
"dash_core_components.Graph",
"app.app.run_serv... | [((336, 372), 'numpy.linspace', 'np.linspace', (['(-x_max)', 'x_max', 'n_points'], {}), '(-x_max, x_max, n_points)\n', (347, 372), True, 'import numpy as np\n'), ((431, 510), 'apps.commons.gen_header', 'gen_header', (['"""Cohen\'s d-value"""'], {'logo': '"""/assets/icons8-return-96.png"""', 'href': '"""/toc"""'}), '("Cohen\'s d-value", logo=\'/assets/icons8-return-96.png\', href=\'/toc\')\n', (441, 510), False, 'from apps.commons import gen_header, common_fig_layout\n'), ((542, 569), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""fig-display"""'}), "(id='fig-display')\n", (551, 569), True, 'import dash_core_components as dcc\n'), ((4472, 4511), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'fig_layout'}), '(data=data, layout=fig_layout)\n', (4481, 4511), True, 'import plotly.graph_objs as go\n'), ((3184, 3215), 'dash.dependencies.Output', 'Output', (['"""fig-display"""', '"""figure"""'], {}), "('fig-display', 'figure')\n", (3190, 3215), False, 'from dash.dependencies import Input, Output\n'), ((4603, 4629), 'app.app.run_server', 'app.run_server', ([], {'debug': '(True)'}), '(debug=True)\n', (4617, 4629), False, 'from app import app\n'), ((1809, 1845), 'dash_html_components.Div', 'html.Div', (['header'], {'className': '"""w3-row"""'}), "(header, className='w3-row')\n", (1817, 1845), True, 'import dash_html_components as html\n'), ((3894, 3938), 'numpy.sqrt', 'np.sqrt', (['((sigma_1 ** 2 + sigma_2 ** 2) / 2.0)'], {}), '((sigma_1 ** 2 + sigma_2 ** 2) / 2.0)\n', (3901, 3938), True, 'import numpy as np\n'), ((4362, 4412), 'plotly.graph_objs.layout.Title', 'go.layout.Title', ([], {'text': 'fig_title', 'xref': '"""paper"""', 'x': '(0)'}), "(text=fig_title, xref='paper', x=0)\n", (4377, 4412), True, 'import plotly.graph_objs as go\n'), ((3222, 3248), 'dash.dependencies.Input', 'Input', (['"""delta-mu"""', '"""value"""'], {}), "('delta-mu', 'value')\n", (3227, 3248), False, 'from dash.dependencies import Input, Output\n'), ((3255, 3280), 'dash.dependencies.Input', 'Input', (['"""sigma-1"""', '"""value"""'], {}), "('sigma-1', 'value')\n", (3260, 3280), False, 'from dash.dependencies import Input, Output\n'), ((3287, 3312), 'dash.dependencies.Input', 'Input', (['"""sigma-2"""', '"""value"""'], {}), "('sigma-2', 'value')\n", (3292, 3312), False, 'from dash.dependencies import Input, Output\n'), ((749, 797), 'dash_html_components.Label', 'html.Label', (['"""set Δμ:"""'], {'className': '"""control_label"""'}), "('set Δμ:', className='control_label')\n", (759, 797), True, 'import dash_html_components as html\n'), ((1106, 1154), 'dash_html_components.Label', 'html.Label', (['"""set σ₁:"""'], {'className': '"""control_label"""'}), "('set σ₁:', className='control_label')\n", (1116, 1154), True, 'import dash_html_components as html\n'), ((1174, 1327), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""sigma-1"""', 'min': '(0.5)', 'max': '(2.0)', 'step': '(0.1)', 'value': '(1.0)', 'marks': "{(0.5): '0.5', (1): '1.0', (1.5): '1.5', (2): '2.0'}", 'className': '"""dcc_control"""'}), "(id='sigma-1', min=0.5, max=2.0, step=0.1, value=1.0, marks={(0.5\n ): '0.5', (1): '1.0', (1.5): '1.5', (2): '2.0'}, className='dcc_control')\n", (1184, 1327), True, 'import dash_core_components as dcc\n'), ((1463, 1511), 'dash_html_components.Label', 'html.Label', (['"""set σ₂:"""'], {'className': '"""control_label"""'}), "('set σ₂:', className='control_label')\n", (1473, 1511), True, 'import dash_html_components as html\n'), ((1531, 1684), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""sigma-2"""', 'min': '(0.5)', 'max': '(2.0)', 'step': '(0.1)', 'value': '(1.0)', 'marks': "{(0.5): '0.5', (1): '1.0', (1.5): '1.5', (2): '2.0'}", 'className': '"""dcc_control"""'}), "(id='sigma-2', min=0.5, max=2.0, step=0.1, value=1.0, marks={(0.5\n ): '0.5', (1): '1.0', (1.5): '1.5', (2): '2.0'}, className='dcc_control')\n", (1541, 1684), True, 'import dash_core_components as dcc\n'), ((1894, 2196), 'dash_html_components.P', 'html.P', (['"""\n Cohen\'s d-value is a measure of the effect size (e.g. difference between control and treatment group)\n calculated using the difference of means scaled by a pooled standard deviation. Using\n population parameters, it is defined as:\n """'], {}), '(\n """\n Cohen\'s d-value is a measure of the effect size (e.g. difference between control and treatment group)\n calculated using the difference of means scaled by a pooled standard deviation. Using\n population parameters, it is defined as:\n """\n )\n', (1900, 2196), True, 'import dash_html_components as html\n'), ((2200, 2330), 'dash_html_components.Img', 'html.Img', ([], {'src': '"""/assets/Cohen_d.svg"""', 'style': "{'display': 'block', 'margin-left': 'auto', 'margin-right': 'auto', 'width':\n '67%'}"}), "(src='/assets/Cohen_d.svg', style={'display': 'block',\n 'margin-left': 'auto', 'margin-right': 'auto', 'width': '67%'})\n", (2208, 2330), True, 'import dash_html_components as html\n'), ((2395, 2631), 'dash_html_components.P', 'html.P', (['"""\n The d-value is a dimensionless quantity and can be employed across scientific disciplines. It is frequently\n used in estimating necessary sample sizes for statistical testing.\n """'], {}), '(\n """\n The d-value is a dimensionless quantity and can be employed across scientific disciplines. It is frequently\n used in estimating necessary sample sizes for statistical testing.\n """\n )\n', (2401, 2631), True, 'import dash_html_components as html\n'), ((2635, 2861), 'dash_html_components.P', 'html.P', (['"""\n Smaller d-values indicate a stronger overlap of the distributions of measured quantities for the two groups.\n Use the sliders to change the values for Δμ, σ₁ and σ₂.\n """'], {}), '(\n """\n Smaller d-values indicate a stronger overlap of the distributions of measured quantities for the two groups.\n Use the sliders to change the values for Δμ, σ₁ and σ₂.\n """\n )\n', (2641, 2861), True, 'import dash_html_components as html\n'), ((2997, 3034), 'dash_html_components.Div', 'html.Div', (['sliders'], {'className': '"""w3-row"""'}), "(sliders, className='w3-row')\n", (3005, 3034), True, 'import dash_html_components as html\n')] |
import tensorflow as tf
import numpy as np
import math
def lower_keys(dict):
return { k.lower(): v for k, v in dict.items()}
def printAsTabel(results, split_statics):
total = results[-1]
total_statics = split_statics[-1]
lines = []
for result, statics in zip(results[:-1], split_statics[:-1]):
line = None
for m_type, item in result.items():
# header
if line is None:
line = ' ' * 9 + ' |' + ' |'.join(['{:>6}'] * (1 + len(item.keys()))).format(*item.keys(), 'total') + '\n'
sep = '-' * len(line) + '\n'
line += ' ' * 9 + ' |' + ' |'.join(['T:{:>4}'] * (1 + len(item.keys()))).format(*[statics[k][0] for k in item], total_statics[0]) + '\n'
line += ' ' * 9 + ' |' + ' |'.join(['V:{:>4}'] * (1 + len(item.keys()))).format(*[statics[k][1] for k in item], total_statics[1]) + '\n'
line += sep
# row
if m_type == 'w_scene':
line += '{:>9} |'.format(m_type) + ' |'.join(['{:6d}'] * (1 + len(item.keys()))).format(*item.values(), total[m_type]) + '\n'
else:
line += '{:>9} |'.format(m_type) + ' |'.join(['{:.4f}'] * (1 + len(item.keys()))).format(*item.values(), total[m_type]) + '\n'
lines.append(line)
return lines
def printAsTabelTest(results, split_statics):
total = results[-1]
total_statics = split_statics[-1]
lines = []
for result, statics in zip(results[:-1], split_statics[:-1]):
line = None
for m_type, item in result.items():
# header
if line is None:
line = ' ' * 9 + ' |' + ' |'.join(['{:>6}'] * (1 + len(item.keys()))).format(*item.keys(), 'total') + '\n'
sep = '-' * len(line) + '\n'
line += ' ' * 9 + ' |' + ' |'.join(['V:{:>4}'] * (1 + len(item.keys()))).format(*[statics[k] for k in item], total_statics) + '\n'
line += sep
# row
line += '{:>9} |'.format(m_type) + ' |'.join(['{:.4f}'] * (1 + len(item.keys()))).format(*item.values(), total[m_type]) + '\n'
lines.append(line)
return lines
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '#'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def cal_rank(data):
sub_xyz, xyz, r = data
ranks = []
for x, y, z in sub_xyz:
bound_x = (x-r <= xyz[:, 0]) & (xyz[:, 0] <= x+r)
bound_y = (y-r <= xyz[:, 1]) & (xyz[:, 1] <= y+r)
bound_z = ( -1 <= xyz[:, 2]) & (xyz[:, 2] <= z)
inbox_idx = bound_x & bound_y & bound_z
inbox_points = np.sum(inbox_idx)
ranks.append(inbox_points)
return ranks
def convert_to_cam_coordinate(xyz, cam_param):
'''
Arguments:
xyz np.array [N x 3]
cam_param (tx, ty, tz, rx, ry, rz, worldInverseMat, projectionMat)
'''
tx, ty, tz = cam_param[:3]
rx, ry, rz = cam_param[3:6]
# need to convert from column major
# world_inverse_mat = cam_param[6:22].reshape((4, 4)).T
cosYaw = math.cos(-rz)
sinYaw = math.sin(-rz)
Rz = np.array(([
[cosYaw, sinYaw, 0],
[-sinYaw, cosYaw, 0],
[0, 0, 1]
]), dtype=np.float32)
cosRoll = math.cos(-rx)
sinRoll = math.sin(-rx)
Rx = np.array([
[1, 0, 0],
[0, cosRoll, sinRoll],
[0, -sinRoll, cosRoll]
], dtype=np.float32)
# 1. translate
cXYZ = xyz - [tx, ty, tz]
# 2. rotate inverse of ZXY
cXYZ = cXYZ @ (Rz @ Rx)
return cXYZ.astype(np.float32)
def convert_to_projection_coordinate(cxyz, cam_param):
# need to convert from column major
# map to x:[-1, 1], y:[-1, 1], z:[-1, 1]
projection_mat = cam_param[22:38].reshape((4, 4)).T
# add one more homogenose
pXYZ = np.ones((cxyz.shape[0], 4), dtype=np.float32)
pXYZ[:, :3] = cxyz
pXYZ = (projection_mat @ pXYZ.T).T
# divide w
# from https://stackoverflow.com/questions/16202348/numpy-divide-row-by-row-sum
pXYZ = pXYZ / pXYZ[:, -1, None]
return pXYZ[:, :3]
def old_closer_to_the_inside_point(xyz, inside, direction = 1, space = 1):
inside_xyz = xyz[inside == 1]
offset_z = np.max(inside_xyz[:, 2]) if direction == 1 else np.min(inside_xyz[:, 2])
if space == 0:
offset_z = np.mean(inside_xyz[:, 2])
mean_x = np.mean(inside_xyz[:, 0])
mean_y = np.mean(inside_xyz[:, 1])
xyz = xyz - [mean_x, mean_y, offset_z]
return xyz
def closer_to_the_inside_point(xyz, inside, direction = 1, space = 1):
inside_xyz = xyz[inside == 1]
mean_x = np.mean(inside_xyz[:, 0])
mean_y = np.mean(inside_xyz[:, 1])
mean_z = np.mean(inside_xyz[:, 2])
# mean_x = -1.0
# mean_y = -1.0
# mean_z = 1.0
d = math.sqrt(mean_x **2 + mean_y**2 + mean_z**2)
d2 = math.sqrt(mean_x **2 + mean_z **2)
sinBeta = mean_x / d2
cosBeta = -mean_z / d2
Ry = np.array([
[cosBeta, 0, -sinBeta],
[0, 1, 0],
[sinBeta, 0, cosBeta]
], dtype=np.float64)
sinGama = -mean_y / d
cosGama = abs(d2 / d)
Rx = np.array([
[1, 0, 0],
[0, cosGama, sinGama],
[0, -sinGama, cosGama]
], dtype=np.float64)
Rmt = Ry @ Rx
xyz = xyz @ Rmt
# print('check===>', np.array([[mean_x, mean_y, mean_z]], dtype=np.float64) @ Ry @ Rx)
inside_xyz = xyz[inside == 1]
offset_z = np.max(inside_xyz[:, 2])
xyz[:, 2] = xyz[:, 2] - offset_z
# xyz = xyz - [mean_x, mean_y, offset_z]
return xyz
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
gts = tf.reduce_sum(gt_sorted)
intersection = gts - tf.cumsum(gt_sorted)
union = gts + tf.cumsum(1. - gt_sorted)
jaccard = 1. - intersection / union
jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
return jaccard
# --------------------------- Lovasz BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
def treat_image(log_lab):
log, lab = log_lab
log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
log, lab = flatten_binary_scores(log, lab, ignore)
return lovasz_hinge_flat(log, lab)
losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
loss = tf.reduce_mean(losses)
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
def compute_loss():
labelsf = tf.cast(labels, logits.dtype)
signs = 2. * labelsf - 1.
errors = 1. - logits * tf.stop_gradient(signs)
errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
gt_sorted = tf.gather(labelsf, perm)
grad = lovasz_grad(gt_sorted)
loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
return loss
# deal with the void prediction case (only void pixels)
loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
lambda: tf.reduce_sum(logits) * 0.,
compute_loss,
strict=True,
name="loss"
)
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = tf.reshape(scores, (-1,))
labels = tf.reshape(labels, (-1,))
if ignore is None:
return scores, labels
valid = tf.not_equal(labels, ignore)
vscores = tf.boolean_mask(scores, valid, name='valid_scores')
vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
return vscores, vlabels
# --------------------------- Focal BINARY LOSSES ---------------------------
def focal_loss_sigmoid_on_2_classification(labels, logtis, alpha=0.5, gamma=2):
y_pred = tf.to_float(tf.sigmoid(logtis[:, :, 1])) # 转换成概率值
labels = tf.to_float(tf.argmax(labels, axis=2)) # int -> float
loss = -labels * alpha * ((1 - y_pred) ** gamma) * tf.log(tf.clip_by_value(y_pred, 1e-9, 1.0)) \
-(1 - labels) * (1 - alpha) * (y_pred ** gamma) * tf.log(tf.clip_by_value(1- y_pred, 1e-9, 1.0))
return loss
| [
"tensorflow.reduce_sum",
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow.cumsum",
"tensorflow.reshape",
"numpy.ones",
"numpy.mean",
"tensorflow.not_equal",
"tensorflow.nn.relu",
"tensorflow.gather",
"tensorflow.concat",
"tensorflow.cast",
"numpy.max",
"math.cos",
"tensorflow.map_fn"... | [((4007, 4020), 'math.cos', 'math.cos', (['(-rz)'], {}), '(-rz)\n', (4015, 4020), False, 'import math\n'), ((4038, 4051), 'math.sin', 'math.sin', (['(-rz)'], {}), '(-rz)\n', (4046, 4051), False, 'import math\n'), ((4065, 4152), 'numpy.array', 'np.array', (['[[cosYaw, sinYaw, 0], [-sinYaw, cosYaw, 0], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[cosYaw, sinYaw, 0], [-sinYaw, cosYaw, 0], [0, 0, 1]], dtype=np.\n float32)\n', (4073, 4152), True, 'import numpy as np\n'), ((4223, 4236), 'math.cos', 'math.cos', (['(-rx)'], {}), '(-rx)\n', (4231, 4236), False, 'import math\n'), ((4255, 4268), 'math.sin', 'math.sin', (['(-rx)'], {}), '(-rx)\n', (4263, 4268), False, 'import math\n'), ((4282, 4373), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, cosRoll, sinRoll], [0, -sinRoll, cosRoll]]'], {'dtype': 'np.float32'}), '([[1, 0, 0], [0, cosRoll, sinRoll], [0, -sinRoll, cosRoll]], dtype=\n np.float32)\n', (4290, 4373), True, 'import numpy as np\n'), ((4819, 4864), 'numpy.ones', 'np.ones', (['(cxyz.shape[0], 4)'], {'dtype': 'np.float32'}), '((cxyz.shape[0], 4), dtype=np.float32)\n', (4826, 4864), True, 'import numpy as np\n'), ((5361, 5386), 'numpy.mean', 'np.mean', (['inside_xyz[:, 0]'], {}), '(inside_xyz[:, 0])\n', (5368, 5386), True, 'import numpy as np\n'), ((5400, 5425), 'numpy.mean', 'np.mean', (['inside_xyz[:, 1]'], {}), '(inside_xyz[:, 1])\n', (5407, 5425), True, 'import numpy as np\n'), ((5609, 5634), 'numpy.mean', 'np.mean', (['inside_xyz[:, 0]'], {}), '(inside_xyz[:, 0])\n', (5616, 5634), True, 'import numpy as np\n'), ((5648, 5673), 'numpy.mean', 'np.mean', (['inside_xyz[:, 1]'], {}), '(inside_xyz[:, 1])\n', (5655, 5673), True, 'import numpy as np\n'), ((5687, 5712), 'numpy.mean', 'np.mean', (['inside_xyz[:, 2]'], {}), '(inside_xyz[:, 2])\n', (5694, 5712), True, 'import numpy as np\n'), ((5780, 5830), 'math.sqrt', 'math.sqrt', (['(mean_x ** 2 + mean_y ** 2 + mean_z ** 2)'], {}), '(mean_x ** 2 + mean_y ** 2 + mean_z ** 2)\n', (5789, 5830), False, 'import math\n'), ((5835, 5871), 'math.sqrt', 'math.sqrt', (['(mean_x ** 2 + mean_z ** 2)'], {}), '(mean_x ** 2 + mean_z ** 2)\n', (5844, 5871), False, 'import math\n'), ((5937, 6028), 'numpy.array', 'np.array', (['[[cosBeta, 0, -sinBeta], [0, 1, 0], [sinBeta, 0, cosBeta]]'], {'dtype': 'np.float64'}), '([[cosBeta, 0, -sinBeta], [0, 1, 0], [sinBeta, 0, cosBeta]], dtype=\n np.float64)\n', (5945, 6028), True, 'import numpy as np\n'), ((6116, 6207), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, cosGama, sinGama], [0, -sinGama, cosGama]]'], {'dtype': 'np.float64'}), '([[1, 0, 0], [0, cosGama, sinGama], [0, -sinGama, cosGama]], dtype=\n np.float64)\n', (6124, 6207), True, 'import numpy as np\n'), ((6413, 6437), 'numpy.max', 'np.max', (['inside_xyz[:, 2]'], {}), '(inside_xyz[:, 2])\n', (6419, 6437), True, 'import numpy as np\n'), ((6682, 6706), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['gt_sorted'], {}), '(gt_sorted)\n', (6695, 6706), True, 'import tensorflow as tf\n'), ((6851, 6907), 'tensorflow.concat', 'tf.concat', (['(jaccard[0:1], jaccard[1:] - jaccard[:-1])', '(0)'], {}), '((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)\n', (6860, 6907), True, 'import tensorflow as tf\n'), ((9052, 9077), 'tensorflow.reshape', 'tf.reshape', (['scores', '(-1,)'], {}), '(scores, (-1,))\n', (9062, 9077), True, 'import tensorflow as tf\n'), ((9091, 9116), 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), '(labels, (-1,))\n', (9101, 9116), True, 'import tensorflow as tf\n'), ((9182, 9210), 'tensorflow.not_equal', 'tf.not_equal', (['labels', 'ignore'], {}), '(labels, ignore)\n', (9194, 9210), True, 'import tensorflow as tf\n'), ((9225, 9276), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['scores', 'valid'], {'name': '"""valid_scores"""'}), "(scores, valid, name='valid_scores')\n", (9240, 9276), True, 'import tensorflow as tf\n'), ((9291, 9342), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['labels', 'valid'], {'name': '"""valid_labels"""'}), "(labels, valid, name='valid_labels')\n", (9306, 9342), True, 'import tensorflow as tf\n'), ((3533, 3550), 'numpy.sum', 'np.sum', (['inbox_idx'], {}), '(inbox_idx)\n', (3539, 3550), True, 'import numpy as np\n'), ((5211, 5235), 'numpy.max', 'np.max', (['inside_xyz[:, 2]'], {}), '(inside_xyz[:, 2])\n', (5217, 5235), True, 'import numpy as np\n'), ((5259, 5283), 'numpy.min', 'np.min', (['inside_xyz[:, 2]'], {}), '(inside_xyz[:, 2])\n', (5265, 5283), True, 'import numpy as np\n'), ((5322, 5347), 'numpy.mean', 'np.mean', (['inside_xyz[:, 2]'], {}), '(inside_xyz[:, 2])\n', (5329, 5347), True, 'import numpy as np\n'), ((6732, 6752), 'tensorflow.cumsum', 'tf.cumsum', (['gt_sorted'], {}), '(gt_sorted)\n', (6741, 6752), True, 'import tensorflow as tf\n'), ((6771, 6797), 'tensorflow.cumsum', 'tf.cumsum', (['(1.0 - gt_sorted)'], {}), '(1.0 - gt_sorted)\n', (6780, 6797), True, 'import tensorflow as tf\n'), ((7643, 7701), 'tensorflow.map_fn', 'tf.map_fn', (['treat_image', '(logits, labels)'], {'dtype': 'tf.float32'}), '(treat_image, (logits, labels), dtype=tf.float32)\n', (7652, 7701), True, 'import tensorflow as tf\n'), ((7717, 7739), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (7731, 7739), True, 'import tensorflow as tf\n'), ((8152, 8181), 'tensorflow.cast', 'tf.cast', (['labels', 'logits.dtype'], {}), '(labels, logits.dtype)\n', (8159, 8181), True, 'import tensorflow as tf\n'), ((8388, 8412), 'tensorflow.gather', 'tf.gather', (['labelsf', 'perm'], {}), '(labelsf, perm)\n', (8397, 8412), True, 'import tensorflow as tf\n'), ((9557, 9584), 'tensorflow.sigmoid', 'tf.sigmoid', (['logtis[:, :, 1]'], {}), '(logtis[:, :, 1])\n', (9567, 9584), True, 'import tensorflow as tf\n'), ((9620, 9645), 'tensorflow.argmax', 'tf.argmax', (['labels'], {'axis': '(2)'}), '(labels, axis=2)\n', (9629, 9645), True, 'import tensorflow as tf\n'), ((8479, 8504), 'tensorflow.nn.relu', 'tf.nn.relu', (['errors_sorted'], {}), '(errors_sorted)\n', (8489, 8504), True, 'import tensorflow as tf\n'), ((8506, 8528), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['grad'], {}), '(grad)\n', (8522, 8528), True, 'import tensorflow as tf\n'), ((7469, 7491), 'tensorflow.expand_dims', 'tf.expand_dims', (['log', '(0)'], {}), '(log, 0)\n', (7483, 7491), True, 'import tensorflow as tf\n'), ((7493, 7515), 'tensorflow.expand_dims', 'tf.expand_dims', (['lab', '(0)'], {}), '(lab, 0)\n', (7507, 7515), True, 'import tensorflow as tf\n'), ((8247, 8270), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['signs'], {}), '(signs)\n', (8263, 8270), True, 'import tensorflow as tf\n'), ((8664, 8680), 'tensorflow.shape', 'tf.shape', (['logits'], {}), '(logits)\n', (8672, 8680), True, 'import tensorflow as tf\n'), ((8716, 8737), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['logits'], {}), '(logits)\n', (8729, 8737), True, 'import tensorflow as tf\n'), ((9725, 9761), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y_pred', '(1e-09)', '(1.0)'], {}), '(y_pred, 1e-09, 1.0)\n', (9741, 9761), True, 'import tensorflow as tf\n'), ((9826, 9866), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1 - y_pred)', '(1e-09)', '(1.0)'], {}), '(1 - y_pred, 1e-09, 1.0)\n', (9842, 9866), True, 'import tensorflow as tf\n'), ((8323, 8339), 'tensorflow.shape', 'tf.shape', (['errors'], {}), '(errors)\n', (8331, 8339), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
"""
Simple binning search algorithm utilities
"""
__author__ = "<NAME>, <NAME>"
# Python libraries
import os
import glob
import yaml
import numpy as np
#from matplotlib import mlab
import numpy.lib.recfunctions
import healpy as hp
import astropy.io.fits as pyfits # migrate to fitsio
import fitsio as fits
import sys
import pylab as plt
import numpy as np
from operator import add
from scipy import interpolate
from scipy.signal import argrelextrema
import scipy.ndimage
import pylab as plt
import pyfits
import matplotlib
from matplotlib import mlab
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import gridspec
# Ugali libraries
#import ugali.utils.mlab
import ugali.utils.healpix
import simple.filters
import simple.simple_utils
import simple.diagnostic_plots
########################################################################
with open('config.yaml', 'r') as ymlfile:
cfg = yaml.load(ymlfile)
survey = cfg['survey']
nside = cfg[survey]['nside']
datadir = cfg[survey]['datadir']
isoname = cfg[survey]['isoname']
isosurvey = cfg[survey]['isosurvey']
mag_max = cfg[survey]['mag_max']
basis_1 = cfg[survey]['basis_1']
basis_2 = cfg[survey]['basis_2']
mode = cfg[survey]['mode']
sim_population = cfg[survey]['sim_population']
sim_dir = cfg[survey]['sim_dir']
fracdet_map = cfg[survey]['fracdet']
mag_g = cfg[survey]['mag_g']
mag_r = cfg[survey]['mag_r']
mag_g_err = cfg[survey]['mag_g_err']
mag_r_err = cfg[survey]['mag_r_err']
########################################################################
try:
ra_select, dec_select = float(sys.argv[1]), float(sys.argv[2])
except:
sys.exit('ERROR! Coordinates not given in correct format.')
# Now cut for a single pixel
pix_nside_select = ugali.utils.healpix.angToPix(nside, ra_select, dec_select)
pix_nside_neighbors = np.concatenate([[pix_nside_select], hp.get_all_neighbours(nside, pix_nside_select)])
# Construct data
file_array = []
for pix_nside in pix_nside_neighbors:
inlist = glob.glob('{}/*_{:05d}.fits'.format(datadir, pix_nside))
for infile in inlist:
if not os.path.exists(infile):
continue
file_array.append(infile)
data_array = []
for infile in file_array:
data_array.append(fits.read(infile))
data = np.concatenate(data_array)
#data = simple.filters.dered_mag(survey, data)
#filter = simple.filters.star_filter(survey, data)
#data = data[filter]
###
#data = data[(data['MAG_PSF_R'] < 90) & (data['MAG_PSF_G'] < 90) & (data['MAG_PSF_I'] < 90)]
data = data[(data['MAG_PSF_G'] < 90) & (data['MAG_PSF_I'] < 90)]
#print("EXPNUM_G: {}".format(np.unique(data['EXPNUM_G'])))
#print("EXPNUM_R: {}".format(np.unique(data['EXPNUM_R'])))
#print("CCDNUM_G: {}".format(np.unique(data['CCDNUM_G'])))
#print("CCDNUM_R: {}".format(np.unique(data['CCDNUM_R'])))
## Scatter Plot
##proj = ugali.utils.projector.Projector(ra_select, dec_select)
##for expnum in np.unique(data['EXPNUM_R']):
## x, y = proj.sphereToImage(data[basis_1][data['EXPNUM_R'] == expnum], data[basis_2][data['EXPNUM_R'] == expnum])
## plt.scatter(x, y, edgecolor='none', s=3, label='EXPNUM_R = {}'.format(expnum))
## plt.xlim(0.5, -0.5)
## plt.ylim(-0.5, 0.5)
## plt.gca().set_aspect('equal')
## plt.xlabel(r'$\Delta \alpha$ (deg)')
## plt.ylabel(r'$\Delta \delta$ (deg)')
## plt.legend(loc='upper left')
## plt.title('Stars at (RA, Dec) = ({}, {})'.format(ra_select, dec_select))
## plt.savefig('v2_scatter_test_expnum-r_{}.png'.format(expnum))
## plt.close()
#proj = ugali.utils.projector.Projector(ra_select, dec_select)
#x, y = proj.sphereToImage(data[basis_1], data[basis_2])
#plt.scatter(x, y, edgecolor='none', s=3)
#g_ra = [178.216, 178.233, 177.809]
#g_dec = [-41.8225, -41.806, -41.841]
#r_ra = [176.82, 176.832, 176.844, 176.856, 177.809, 177.038]
#r_dec = [-41.738, -41.7227, -41.7074, -41.6922, -41.841, -41.294]
#g_x, g_y = proj.sphereToImage(g_ra, g_dec)
#r_x, r_y = proj.sphereToImage(r_ra, r_dec)
#plt.scatter(g_x, g_y, edgecolor='none', c='g', s=10, label='g')
#plt.scatter(r_x, r_y, edgecolor='none', c='r', s=10, label='r')
#plt.xlim(1.0, -1.0)
#plt.ylim(-1.0, 1.0)
#plt.gca().set_aspect('equal')
#plt.xlabel(r'$\Delta \alpha$ (deg)')
#plt.ylabel(r'$\Delta \delta$ (deg)')
#plt.legend(loc='upper left')
#plt.title('Stars at (RA, Dec) = ({}, {})'.format(ra_select, dec_select))
#plt.savefig('v2_scatter_test.png')
#plt.close()
## CMD Plot
#angsep_1 = ugali.utils.projector.angsep(ra_select-0.5, dec_select, data[basis_1], data[basis_2])
#angsep_2 = ugali.utils.projector.angsep(ra_select+0.5, dec_select, data[basis_1], data[basis_2])
#g_radius = 0
#annulus_1 = (angsep_1 > g_radius) & (angsep_1 < 1.)
#annulus_2 = (angsep_2 > g_radius) & (angsep_2 < 1.)
#
## Plot background objects
#plt.scatter(data[mag_g][annulus_1] - data[mag_r][annulus_1], data[mag_g][annulus_1], c='r', alpha=0.5, edgecolor='none', s=1, label='RA-0.5')
#plt.scatter(data[mag_g][annulus_2] - data[mag_r][annulus_2], data[mag_g][annulus_2], c='g', alpha=0.5, edgecolor='none', s=1, label='RA+0.5')
#plt.axvline(x=np.mean(data[mag_g][annulus_1] - data[mag_r][annulus_1]), c='r', label='mean = {}'.format(np.mean(data[mag_g][annulus_1] - data[mag_r][annulus_1])))
#plt.axvline(x=np.mean(data[mag_g][annulus_2] - data[mag_r][annulus_2]), c='g', label='mean = {}'.format(np.mean(data[mag_g][annulus_2] - data[mag_r][annulus_2])))
#
#plt.axis([-0.5, 1, 16, mag_max])
#plt.gca().invert_yaxis()
#plt.gca().set_aspect(1./4.)
#plt.xlabel('g-r (mag)')
#plt.ylabel('g (mag)')
#plt.legend(loc='upper left')
#plt.title('CMD at (RA, Dec) = ({}, {})'.format(ra_select, dec_select))
#plt.savefig('cmd_test.png')
#plt.close()
# Density plot
proj = ugali.utils.projector.Projector(ra_select, dec_select)
x, y = proj.sphereToImage(data[basis_1], data[basis_2])
bound = 0.5 #1.
steps = 100.
bins = np.linspace(-bound, bound, steps)
signal = np.histogram2d(x, y, bins=[bins, bins])[0]
g_radius = 0.5
sigma = 0.01 * (0.25 * np.arctan(0.25*g_radius*60. - 1.5) + 1.3)
convolution = scipy.ndimage.filters.gaussian_filter(signal, sigma/(bound/steps))
plt.pcolormesh(bins, bins, convolution.T, cmap='Greys')
plt.xlim(bound, -bound)
plt.ylim(-bound, bound)
plt.gca().set_aspect('equal')
plt.xlabel(r'$\Delta \alpha$ (deg)')
plt.ylabel(r'$\Delta \delta$ (deg)')
plt.colorbar()
plt.title('Density (MAG < 90 in g, i) at (RA, Dec) = ({}, {})'.format(ra_select, dec_select))
plt.savefig('density_g-i.png')
plt.close()
| [
"pylab.close",
"yaml.load",
"healpy.get_all_neighbours",
"pylab.pcolormesh",
"numpy.histogram2d",
"pylab.ylabel",
"os.path.exists",
"sys.exit",
"fitsio.read",
"pylab.savefig",
"pylab.colorbar",
"pylab.ylim",
"numpy.linspace",
"pylab.xlabel",
"pylab.gca",
"pylab.xlim",
"numpy.arctan",... | [((2358, 2384), 'numpy.concatenate', 'np.concatenate', (['data_array'], {}), '(data_array)\n', (2372, 2384), True, 'import numpy as np\n'), ((5919, 5952), 'numpy.linspace', 'np.linspace', (['(-bound)', 'bound', 'steps'], {}), '(-bound, bound, steps)\n', (5930, 5952), True, 'import numpy as np\n'), ((6169, 6224), 'pylab.pcolormesh', 'plt.pcolormesh', (['bins', 'bins', 'convolution.T'], {'cmap': '"""Greys"""'}), "(bins, bins, convolution.T, cmap='Greys')\n", (6183, 6224), True, 'import pylab as plt\n'), ((6226, 6249), 'pylab.xlim', 'plt.xlim', (['bound', '(-bound)'], {}), '(bound, -bound)\n', (6234, 6249), True, 'import pylab as plt\n'), ((6250, 6273), 'pylab.ylim', 'plt.ylim', (['(-bound)', 'bound'], {}), '(-bound, bound)\n', (6258, 6273), True, 'import pylab as plt\n'), ((6304, 6341), 'pylab.xlabel', 'plt.xlabel', (['"""$\\\\Delta \\\\alpha$ (deg)"""'], {}), "('$\\\\Delta \\\\alpha$ (deg)')\n", (6314, 6341), True, 'import pylab as plt\n'), ((6341, 6378), 'pylab.ylabel', 'plt.ylabel', (['"""$\\\\Delta \\\\delta$ (deg)"""'], {}), "('$\\\\Delta \\\\delta$ (deg)')\n", (6351, 6378), True, 'import pylab as plt\n'), ((6378, 6392), 'pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (6390, 6392), True, 'import pylab as plt\n'), ((6487, 6517), 'pylab.savefig', 'plt.savefig', (['"""density_g-i.png"""'], {}), "('density_g-i.png')\n", (6498, 6517), True, 'import pylab as plt\n'), ((6518, 6529), 'pylab.close', 'plt.close', ([], {}), '()\n', (6527, 6529), True, 'import pylab as plt\n'), ((943, 961), 'yaml.load', 'yaml.load', (['ymlfile'], {}), '(ymlfile)\n', (952, 961), False, 'import yaml\n'), ((5963, 6002), 'numpy.histogram2d', 'np.histogram2d', (['x', 'y'], {'bins': '[bins, bins]'}), '(x, y, bins=[bins, bins])\n', (5977, 6002), True, 'import numpy as np\n'), ((1729, 1788), 'sys.exit', 'sys.exit', (['"""ERROR! Coordinates not given in correct format."""'], {}), "('ERROR! Coordinates not given in correct format.')\n", (1737, 1788), False, 'import sys\n'), ((1955, 2001), 'healpy.get_all_neighbours', 'hp.get_all_neighbours', (['nside', 'pix_nside_select'], {}), '(nside, pix_nside_select)\n', (1976, 2001), True, 'import healpy as hp\n'), ((2331, 2348), 'fitsio.read', 'fits.read', (['infile'], {}), '(infile)\n', (2340, 2348), True, 'import fitsio as fits\n'), ((6274, 6283), 'pylab.gca', 'plt.gca', ([], {}), '()\n', (6281, 6283), True, 'import pylab as plt\n'), ((2187, 2209), 'os.path.exists', 'os.path.exists', (['infile'], {}), '(infile)\n', (2201, 2209), False, 'import os\n'), ((6045, 6084), 'numpy.arctan', 'np.arctan', (['(0.25 * g_radius * 60.0 - 1.5)'], {}), '(0.25 * g_radius * 60.0 - 1.5)\n', (6054, 6084), True, 'import numpy as np\n')] |
import numpy as np
from scipy.misc import logsumexp
def comp_edge_cts(A, comm_idxs):
"""
Computes the number of edges between the n_comm communities in (multi-)graph with adjacency matrix A
and community memberships comm_idxs.
Used for inference calculations in SBM-type models
:param A: nxn matrix, adjacency matrix of (multi-)graph
:param comm_idxs: length n_comm list of lists, comm_idxs[k] is list of vertices in community k
:return: n_comm x n_comm matrix, edge_counts[k,l] is number of edges between communities k and l
"""
n_comm = len(comm_idxs)
edge_cts = np.zeros([n_comm, n_comm])
for k in range(n_comm):
for l in range(k, n_comm):
for i in comm_idxs[k]:
edge_cts[k, l] += np.sum(A[i, comm_idxs[l]]) # number of edges from vertex i to community l
if k == l:
edge_cts[k, l] = edge_cts[k, l] / 2
edge_cts[l, k] = edge_cts[k, l]
return edge_cts
def comp_tot_cts(comm_cts):
"""
Computes the maximum number of possible edges between the n_comm communities in a simple graph with community
occupations n.
Used for inference calculations in SBM-type models
:param comm_cts: length n_comm list of integers, n_comm[k] is number of vertices in community k
:return: n_comm x n_comm matrix, tot_cts[k,l] is number of edges between communities k and l
"""
n_comm = len(comm_cts)
tot_cts = np.zeros([n_comm, n_comm])
for k in range(n_comm):
for l in range(k, n_comm):
if (k != l):
tot_cts[k, l] = comm_cts[k] * comm_cts[l]
else:
tot_cts[k, l] = comm_cts[k] * (comm_cts[k] - 1) / 2
tot_cts[l, k] = tot_cts[k, l]
return tot_cts
def softmax(log_prob):
"""
Numerically stable (very, very close) approximation to:
return np.exp(log_prob)/sum(np.exp(log_prob))
:param log_prob: logs of (unnormalized) probability distribution
:return: vector of (non-negative) reals that sums to 1
"""
prob = np.zeros(len(log_prob))
rescale = log_prob - np.max(log_prob)
# entries that give "non-negligible" probability (0 to less than ~43 decimal places)
non_neg = rescale > -100
# numerically stable version of np.exp(~)/np.sum(np.exp(~))
# prob[non_neg] = np.exp(log_prob[non_neg] - logsumexp(log_prob[non_neg]))
prob[non_neg] = np.exp(rescale[non_neg])
prob[non_neg] = prob[non_neg]/np.sum(prob[non_neg])
return prob
| [
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.exp"
] | [((607, 633), 'numpy.zeros', 'np.zeros', (['[n_comm, n_comm]'], {}), '([n_comm, n_comm])\n', (615, 633), True, 'import numpy as np\n'), ((1454, 1480), 'numpy.zeros', 'np.zeros', (['[n_comm, n_comm]'], {}), '([n_comm, n_comm])\n', (1462, 1480), True, 'import numpy as np\n'), ((2415, 2439), 'numpy.exp', 'np.exp', (['rescale[non_neg]'], {}), '(rescale[non_neg])\n', (2421, 2439), True, 'import numpy as np\n'), ((2115, 2131), 'numpy.max', 'np.max', (['log_prob'], {}), '(log_prob)\n', (2121, 2131), True, 'import numpy as np\n'), ((2474, 2495), 'numpy.sum', 'np.sum', (['prob[non_neg]'], {}), '(prob[non_neg])\n', (2480, 2495), True, 'import numpy as np\n'), ((766, 792), 'numpy.sum', 'np.sum', (['A[i, comm_idxs[l]]'], {}), '(A[i, comm_idxs[l]])\n', (772, 792), True, 'import numpy as np\n')] |
import nibabel as nib
import nrrd
import os
import numpy as np
from nipype.interfaces.base import (
BaseInterface, TraitedSpec,
BaseInterfaceInputSpec, traits,
Directory)
from core.utils.filemanip import split_filename
from skimage.transform import resize
from skimage.filters.thresholding import threshold_otsu
from radiants.utils.networks import unet_lung
class LungSegmentationInferenceInputSpec(BaseInterfaceInputSpec):
tensor = traits.Array(desc='Tensor to be fed to the network.')
image_info = traits.Dict(desc='Dictionary with information about the image.')
weights = traits.List(desc='List of network weights.')
outdir = Directory('segmented', usedefault=True,
desc='Folder to store the preprocessing results.')
class LungSegmentationInferenceOutputSpec(TraitedSpec):
segmented_lungs = traits.File(exists=True, desc='Segmented lungs')
class LungSegmentationInference(BaseInterface):
input_spec = LungSegmentationInferenceInputSpec
output_spec = LungSegmentationInferenceOutputSpec
def _run_interface(self, runtime):
"Function to run the CNN inference"
self.image_info = self.inputs.image_info
outdir = os.path.abspath(self.inputs.outdir)
if not os.path.isdir(outdir):
os.makedirs(outdir)
test_set = np.asarray(self.inputs.tensor)
predictions = []
model = unet_lung()
for i, weight in enumerate(self.inputs.weights):
print('Segmentation inference fold {}.'.format(i+1))
model.load_weights(weight)
predictions.append(model.predict(test_set))
predictions = np.asarray(predictions, dtype=np.float16)
self.prediction = np.mean(predictions, axis=0)
self.segmentation = self.save_inference(outdir)
return runtime
def save_inference(self, outdir, binarize=True):
"Function to save the segmented masks"
prediction = self.prediction
z0 = 0
for i, image in enumerate(self.image_info):
try:
_, basename, ext = split_filename(image)
patches = self.image_info[image]['patches']
slices = self.image_info[image]['slices']
resampled_image_dim = self.image_info[image]['image_dim']
indexes = self.image_info[image]['indexes']
deltas = self.image_info[image]['deltas']
original_image_dim = self.image_info[image]['orig_size']
im = prediction[z0:z0+(slices*patches), :, :, 0]
final_prediction = self.inference_reshaping(
im, patches, slices, resampled_image_dim, indexes, deltas,
original_image_dim, binarize=binarize)
outname = os.path.join(outdir, basename.split(
'_resampled')[0]+'_lung_segmented{}'.format(ext))
reference = self.image_info[image]['orig_image']
if ext == '.nrrd':
_, hd = nrrd.read(reference)
nrrd.write(outname, final_prediction, header=hd)
elif ext == '.nii.gz' or ext == '.nii':
ref = nib.load(reference)
im2save = nib.Nifti1Image(final_prediction, affine=ref.affine)
nib.save(im2save, outname)
z0 = z0+(slices*patches)
except:
continue
return outname
def inference_reshaping(self, generated_images, patches, slices,
dims, indexes, deltas, original_size,
binarize=False):
"Function to reshape the predictions"
if patches > 1:
sl = 0
final_image = np.zeros((slices, dims[0], dims[1], patches),
dtype=np.float32)-2
for n in range(0, generated_images.shape[0], patches):
k = 0
for j in indexes[1]:
for i in indexes[0]:
final_image[sl, i[0]:i[1], j[0]:j[1], k] = (
generated_images[n+k, deltas[0]:, deltas[1]:])
k += 1
sl = sl + 1
final_image[final_image==-2] = np.nan
final_image = np.nanmean(final_image, axis=-1)
final_image[np.isnan(final_image)] = 0
else:
final_image = generated_images[:, deltas[0]:, deltas[1]:]
final_image = np.swapaxes(final_image, 0, 2)
final_image = np.swapaxes(final_image, 0, 1)
if final_image.shape != original_size:
final_image = resize(final_image.astype(np.float64), original_size, order=0,
mode='edge', cval=0, anti_aliasing=False)
if binarize:
final_image = self.binarization(final_image)
return final_image
@staticmethod
def binarization(image):
th = threshold_otsu(image)
image[image>=th] = 1
image[image!=1] = 0
return image
def _list_outputs(self):
outputs = self._outputs().get()
outputs['segmented_lungs'] = self.segmentation
return outputs
| [
"numpy.isnan",
"numpy.mean",
"numpy.nanmean",
"os.path.abspath",
"nipype.interfaces.base.traits.File",
"nipype.interfaces.base.Directory",
"nibabel.save",
"numpy.swapaxes",
"nipype.interfaces.base.traits.List",
"nrrd.read",
"nibabel.Nifti1Image",
"skimage.filters.thresholding.threshold_otsu",
... | [((456, 509), 'nipype.interfaces.base.traits.Array', 'traits.Array', ([], {'desc': '"""Tensor to be fed to the network."""'}), "(desc='Tensor to be fed to the network.')\n", (468, 509), False, 'from nipype.interfaces.base import BaseInterface, TraitedSpec, BaseInterfaceInputSpec, traits, Directory\n'), ((527, 591), 'nipype.interfaces.base.traits.Dict', 'traits.Dict', ([], {'desc': '"""Dictionary with information about the image."""'}), "(desc='Dictionary with information about the image.')\n", (538, 591), False, 'from nipype.interfaces.base import BaseInterface, TraitedSpec, BaseInterfaceInputSpec, traits, Directory\n'), ((606, 650), 'nipype.interfaces.base.traits.List', 'traits.List', ([], {'desc': '"""List of network weights."""'}), "(desc='List of network weights.')\n", (617, 650), False, 'from nipype.interfaces.base import BaseInterface, TraitedSpec, BaseInterfaceInputSpec, traits, Directory\n'), ((664, 759), 'nipype.interfaces.base.Directory', 'Directory', (['"""segmented"""'], {'usedefault': '(True)', 'desc': '"""Folder to store the preprocessing results."""'}), "('segmented', usedefault=True, desc=\n 'Folder to store the preprocessing results.')\n", (673, 759), False, 'from nipype.interfaces.base import BaseInterface, TraitedSpec, BaseInterfaceInputSpec, traits, Directory\n'), ((863, 911), 'nipype.interfaces.base.traits.File', 'traits.File', ([], {'exists': '(True)', 'desc': '"""Segmented lungs"""'}), "(exists=True, desc='Segmented lungs')\n", (874, 911), False, 'from nipype.interfaces.base import BaseInterface, TraitedSpec, BaseInterfaceInputSpec, traits, Directory\n'), ((1227, 1262), 'os.path.abspath', 'os.path.abspath', (['self.inputs.outdir'], {}), '(self.inputs.outdir)\n', (1242, 1262), False, 'import os\n'), ((1352, 1382), 'numpy.asarray', 'np.asarray', (['self.inputs.tensor'], {}), '(self.inputs.tensor)\n', (1362, 1382), True, 'import numpy as np\n'), ((1424, 1435), 'radiants.utils.networks.unet_lung', 'unet_lung', ([], {}), '()\n', (1433, 1435), False, 'from radiants.utils.networks import unet_lung\n'), ((1676, 1717), 'numpy.asarray', 'np.asarray', (['predictions'], {'dtype': 'np.float16'}), '(predictions, dtype=np.float16)\n', (1686, 1717), True, 'import numpy as np\n'), ((1744, 1772), 'numpy.mean', 'np.mean', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (1751, 1772), True, 'import numpy as np\n'), ((4505, 4535), 'numpy.swapaxes', 'np.swapaxes', (['final_image', '(0)', '(2)'], {}), '(final_image, 0, 2)\n', (4516, 4535), True, 'import numpy as np\n'), ((4558, 4588), 'numpy.swapaxes', 'np.swapaxes', (['final_image', '(0)', '(1)'], {}), '(final_image, 0, 1)\n', (4569, 4588), True, 'import numpy as np\n'), ((4968, 4989), 'skimage.filters.thresholding.threshold_otsu', 'threshold_otsu', (['image'], {}), '(image)\n', (4982, 4989), False, 'from skimage.filters.thresholding import threshold_otsu\n'), ((1278, 1299), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (1291, 1299), False, 'import os\n'), ((1313, 1332), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (1324, 1332), False, 'import os\n'), ((4314, 4346), 'numpy.nanmean', 'np.nanmean', (['final_image'], {'axis': '(-1)'}), '(final_image, axis=-1)\n', (4324, 4346), True, 'import numpy as np\n'), ((2110, 2131), 'core.utils.filemanip.split_filename', 'split_filename', (['image'], {}), '(image)\n', (2124, 2131), False, 'from core.utils.filemanip import split_filename\n'), ((3767, 3830), 'numpy.zeros', 'np.zeros', (['(slices, dims[0], dims[1], patches)'], {'dtype': 'np.float32'}), '((slices, dims[0], dims[1], patches), dtype=np.float32)\n', (3775, 3830), True, 'import numpy as np\n'), ((4371, 4392), 'numpy.isnan', 'np.isnan', (['final_image'], {}), '(final_image)\n', (4379, 4392), True, 'import numpy as np\n'), ((3040, 3060), 'nrrd.read', 'nrrd.read', (['reference'], {}), '(reference)\n', (3049, 3060), False, 'import nrrd\n'), ((3081, 3129), 'nrrd.write', 'nrrd.write', (['outname', 'final_prediction'], {'header': 'hd'}), '(outname, final_prediction, header=hd)\n', (3091, 3129), False, 'import nrrd\n'), ((3212, 3231), 'nibabel.load', 'nib.load', (['reference'], {}), '(reference)\n', (3220, 3231), True, 'import nibabel as nib\n'), ((3262, 3314), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['final_prediction'], {'affine': 'ref.affine'}), '(final_prediction, affine=ref.affine)\n', (3277, 3314), True, 'import nibabel as nib\n'), ((3335, 3361), 'nibabel.save', 'nib.save', (['im2save', 'outname'], {}), '(im2save, outname)\n', (3343, 3361), True, 'import nibabel as nib\n')] |
#! /usr/bin/python3
# # # # # #
# Please excuse the chaotic code: it was first developed with a different application in mind,
# and then got modified for this project.
# # #
import numpy as np
from time import time
from datetime import datetime
from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger,\
create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict,\
calculate_points
from general_config import config as cfg
# # # # # # # # # # # # # # # # # # # # # # # # #
# # # Argument parsing # # #
# # # # # # # # # # # # # # # # # # # # # # # # #
from argparse import ArgumentParser, RawTextHelpFormatter
parser = ArgumentParser(formatter_class=RawTextHelpFormatter,
description="Structured Motion Stimuli for Chicken experiments",
epilog="If using ipython3, indicate end of ipython arg parser via '--':\n $ ipython3 play.py -- <args>")
parser.add_argument(dest="stimfile", metavar="stimulus_file.py", type=str,
help="python file defining the motion structure (current working directory)")
parser.add_argument("-s", dest="rngseed", metavar="rngseed", default=None, type=int,
help="Seed for numpy's random number generator (default: None)")
parser.add_argument("-v", dest="vidfile", metavar="video_file.mp4", default=None, type=str,
help="Save video of stimulus to disk (default: None)")
parser.add_argument("-t", dest="tmax", metavar="seconds", default=None, type=float,
help="Stimulus duration in seconds, required for -v (default: infinity)")
parser.add_argument("-f", dest="isFullscreen", action='store_true',
help="Run in full screen (press ESC to close; default: false)")
parser.add_argument("-T", dest="maxTrials", metavar="num trials", default=None, type=int,
help="Maximum number of trials (default: infinity)")
parser.add_argument("-R", dest="repTrials", metavar="num reps", default=None, type=int,
help="Trial repetitions (requires -T; leads to T/R unique trials; default: 1)")
parser.add_argument("-g", dest="greeter", metavar="string", default=None, type=str,
help="Greeter displayed before first trial")
parser.add_argument("-u", dest="userID", metavar="ID", default=None, type=int,
help="Integer-valued ID of the participant")
args = parser.parse_args()
# # # Import motion structure from config file # # #
import os
import sys
stimpath, stimfile = os.path.split(args.stimfile)
sys.path.append(stimpath) # make file accessible to import
stimfilebase = stimfile.split(".py")[0]
cmd = "from " + stimfilebase + " import B, lam, tau_vphi" # import relevant stimulus parameters
exec(cmd)
# Optional variables (backward comaptible)
varlist = ["targets", "f_dW", "phi0", "human_readable_dsl", "disc_color"]
for varname in varlist:
cmd = "from " + stimfilebase + " import " + varname
try:
exec(cmd)
except:
globals()[varname] = None
if args.userID is not None:
hdsl = "uid_%05d" % args.userID
if human_readable_dsl is not None:
hdsl += "_" + human_readable_dsl
human_readable_dsl = hdsl
DRYRUN = human_readable_dsl is None
if DRYRUN:
print("\n\n # # # D R Y R U N : No data will be saved! # # #\n\n")
print(" > Motion structure loaded from '%s.py'." % args.stimfile)
if disc_color is not None:
cfg['display']['disc_color'] = disc_color
# # # Select matplotlib backend # # #
import matplotlib as mpl
if args.vidfile is not None:
assert args.tmax is not None, "Error: For video rendering, a duration < infinity is required."
assert not os.path.exists(args.vidfile), "Error: Video output file '%s' already exists." % args.vidfile
mpl.use(cfg['display']['backend_noninteractive'])
mpl.interactive(False)
else:
mpl.use(cfg['display']['backend_interactive'])
mpl.interactive(False)
print(" > Used backend:", mpl.get_backend())
# Assertions on trials and repetitions
if args.repTrials is None:
args.repTrials = 1
else:
assert args.maxTrials is not None, "Option -R requires -T (which was not given)."
assert args.maxTrials % args.repTrials == 0, "Option -R must divide -T."
# # # # # # # # # # # # # # # # # # # # # # # #
# # # Import and process parameters # # #
# # # # # # # # # # # # # # # # # # # # # # # #
DEV = cfg['DEV']
if DEV:
print(" > DEVELOPER mode turned ON!")
# # # RNG seeds (np and trial reps)
np.random.seed(args.rngseed) # random seed
if args.maxTrials is None:
seedlist = np.array([], dtype=np.uint32)
else:
uniqueTrials = args.maxTrials // args.repTrials
maxval = np.iinfo(np.uint32).max
seedlist = np.tile( np.random.randint(0, high=maxval, size=uniqueTrials, dtype=np.uint32), args.repTrials)
np.random.shuffle(seedlist)
seedlist = np.insert(seedlist, 0, [0]) # Seed for the initial fake trial
seedgenerator = (i for i in seedlist) # Yields the next seed
# # # Max time
tmax = args.tmax # max duration
if tmax is None:
INFRUN = True
tmax = 1e10 # 300 years
else:
INFRUN = False
# # # Import motion related parameters # # #
N,M = B.shape # N dots, M motion components
L = B @ np.diag(lam) # The motion structure matrix
dt = cfg['sim']['dt']
tau_vr = cfg['sim']['tau_vr']
tau_r = cfg['sim']['tau_r']
radial_sigma = cfg['sim']['radial_sigma']
radial_mean= cfg['sim']['radial_mean']
# # # Import display related parameters # # #
fps = cfg['display']['fps']
show_labels = cfg['display']['show_labels']
mpl.rc("figure", dpi=cfg['display']['monitor_dpi']) # set monitor dpi
# # # Print a preview of the motion structure # # #
print(" > The motion structure matrix L looks as follows:")
print(asciiL(L, 3))
print(" > This leads to the following velocity covariance matrix:")
if isinstance(tau_vphi, float):
tau_vphi = np.array( [tau_vphi]*M )
print(asciiL(1/2. * L@np.diag(tau_vphi)@L.T, 3))
# # # # # # # # # # # # # # # # # # # # # # # #
# # # Initialize the stimulus generator # # #
# # # # # # # # # # # # # # # # # # # # # # # #
# # # See also class StructuredMotionStimulus in functions.py # # #
kwargs = dict(L=L, tau_vphi=tau_vphi, tau_r=tau_r, tau_vr=tau_vr, radial_sigma=radial_sigma, radial_mean=radial_mean,
dt=dt, fps=fps, f_dW=f_dW, phi0=phi0, rngseed=args.rngseed, DEV=DEV)
stim = StructuredMotionStimulus(**kwargs)
frame_wct = [] # wall clock times of rendered frames
archive = dict( # Store stimulus history (unless INFRUN)
t = [0.], # time points of frames (in sim time)
Phi = [stim.Phi.copy()], # Angular locations and velocities for all N dots
R = [stim.R.copy()], # Radial locations and velocities for all N dots
visible = [np.arange(N)] # Which dots are visible at the time frame
)
# # # INITIALIZE DATA STORAGE # # #
dsl = create_dsl(human_readable_dsl)
print(" > DSL is: %s" % dsl)
# Create output path and copy config data
if not DRYRUN:
outdir = create_outdir(dsl)
from shutil import copyfile
from os import path
copyfile(args.stimfile, path.join(outdir, "config.py"))
copyfile("general_config.py", path.join(outdir, "general_config.py"))
# # # # # # # # # # # # # # # # # # # # # # # # #
# # # Initialize Figure and Plotting # # #
# # # # # # # # # # # # # # # # # # # # # # # # #
import pylab as pl
# # # First plot, called only once # # #
def init_plot():
# # # Axes setup # # #
fig.set_facecolor(cfg['display']['bg_color'])
rect = 0.01, 0.01, 0.98, 0.98
ax = fig.add_axes(rect, projection='polar')
ax.set_facecolor(cfg['display']['bg_color'])
ax.set_thetagrids(np.arange(0,360,45))
# # # Plot the dots # # #
x = archive['Phi'][-1][:N]
y = archive['R'][-1][:N]
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=1.)
cmap = pl.cm.Paired
kwargs = dict(marker='o', s=cfg['display']['disc_radius']**2, c=cfg['display']['disc_color'],
cmap=cmap, norm=norm, linewidths=0., zorder=2)
plottedDots = ax.scatter(x, y, animated=False, **kwargs) # Test if animated and blit should be used in non-interactive backends
plottedDots.set_visible(False) # Initially dots are invisible
# # # Plot the labels # # #
labelkwargs = dict(fontsize=cfg['display']['label_fontsize'], color=cfg['display']['label_color'], weight='bold', ha='center', va='center')
if not show_labels:
labelkwargs['visible'] = False # no labels? set invisible.
plottedLabels = []
for n,(xn,yn) in enumerate(zip(x,y)):
plottedLabels.append( ax.text(xn, yn, str(n+1), **labelkwargs) )
# # # Text instructions # # #
plottedText = ax.text(np.pi/2, 0.25, "", weight='bold', size='14', ha="center")
# # # Axes range and decoration # # #
ax.set_rmax(cfg['display']['axes_radius'])
ax.set_rticks(np.array([1.0,]) * radial_mean)
ax.set_xticks([])
if cfg['display']['show_grid']:
ax.grid(True)
ax.spines['polar'].set_visible(False)
else:
ax.grid(False)
ax.spines['polar'].set_visible(False)
if not DEV:
ax.set_yticklabels([])
ax.set_xticklabels([])
# # # Return a list of variable figure elements (required for blitting) # # #
return [plottedDots,] + plottedLabels + [plottedText]
# # # The central routine to update each frame # # #
def update_dots(count, archive, plottedObjects):
global globalVars
global pointbuffer
# # # matplotlib's FuncAnimation does some ghost calls in the beginning which we skip # # #
if count == 0:
return plottedObjects
# # # Unpack variable elements # # #
plottedDots, plottedLabels, plottedText = plottedObjects[0], plottedObjects[1:-2], plottedObjects[-2]
# # # Set Phase specific controls # # #
frame_in_trial = globalVars.frame_in_trial
phase = phaseChanger.getPhase(frame_in_trial)
if globalVars.trial_number == 0: # Start of experiment
plottedDots.set_visible(False)
else:
plottedDots.set_visible(True)
#print(frame_in_trial, phase)
if (args.maxTrials is not None) and (globalVars.trial_number > args.maxTrials):
globalVars.PAUSE = True
globalVars.HIDETARGETS = False
globalVars.fade_frame_state = 0
globalVars.cursor.set_visible(False)
plottedDots.set_visible(False)
s = "%d points\n" % pointbuffer if pointbuffer is not None else ""
plottedText.set_text(s + "%d trials completed.\nThank you very much!\nClose with <ESC>" % args.maxTrials)
globalVars.COMPLETED = True
return plottedObjects
if phase == "still":
globalVars.PAUSE = True
globalVars.HIDETARGETS = False
globalVars.MOUSEWASRESET = False
globalVars.fade_frame_state = 0
if frame_in_trial < 25:
plottedText.set_text("")
else:
plottedText.set_text("")
globalVars.cursor.set_visible(False)
elif phase == "present":
globalVars.PAUSE = False
globalVars.HIDETARGETS = False
globalVars.fade_frame_state = 0
plottedText.set_text("")
globalVars.cursor.set_visible(False)
elif phase == "fade":
globalVars.PAUSE = False
globalVars.HIDETARGETS = True
plottedText.set_text("")
globalVars.cursor.set_visible(False)
elif phase == "track":
globalVars.PAUSE = False
globalVars.HIDETARGETS = True
globalVars.fade_frame_state = cfg['experiment']['fade']['numFrames']
plottedText.set_text("")
globalVars.cursor.set_visible(False)
globalVars.cursor.reset_mouse_position() # always reset mouse pos to prevent glitches
elif phase == "predict":
if not globalVars.MOUSEWASRESET:
globalVars.cursor.reset_mouse_position()
globalVars.MOUSEWASRESET = True
if len(globalVars.choicetimes) == 1: # Only trial start time
globalVars.choicetimes.append(str(datetime.now()))
globalVars.PAUSE = True
globalVars.HIDETARGETS = True
globalVars.fade_frame_state = cfg['experiment']['fade']['numFrames']
plottedText.set_text("Make your predictions")
globalVars.cursor.set_visible(True)
elif phase == "after":
pointbuffer = calculate_points(globalVars, archive)
if (not DRYRUN) and (not globalVars.writtenToDisk):
datadict = build_data_dict(globalVars, archive)
fname = fname_of_trial(dsl, globalVars.trial_number)
write_trial_to_file(fname, datadict)
globalVars.writtenToDisk = True
# If all trials complete, we automatically start a new trial to clean up.
if (args.maxTrials is not None) and (globalVars.trial_number == args.maxTrials):
plottedDots.set_visible(False)
globalVars.start_new_trial()
else:
globalVars.PAUSE = True
globalVars.HIDETARGETS = False
globalVars.fade_frame_state = cfg['experiment']['fade']['numFrames']
s = "%d points\n" % pointbuffer if pointbuffer is not None else ""
if args.maxTrials is not None:
nLeft = args.maxTrials - globalVars.trial_number
s += "%d trials left\n" % nLeft if nLeft > 1 else "%d trial left\n" % nLeft
plottedText.set_text(s + "<Mouse click> or <space>\nto proceed")
globalVars.cursor.set_visible(False)
# # # Some necessary book keeping # # #
global SIMREADY, t, next_report_time
assert SIMREADY, "Error: Plotting update called before sim was ready. Too high fps?"
if (time() - t_start) > next_report_time: # Print progress?
next_report_time += 1
print(" > Wall-clock time: %7.3fs, simulation time: %7.3fs, frame number: %5d" % (time() - t_start, t, count))
# # # Update the figure with latest data # # #
x = archive['Phi'][-1][:N]
y = archive['R'][-1][:N]
plottedDots.set_offsets(np.vstack([x,y]).T)
for n,(xn,yn) in enumerate(zip(x,y)):
plottedLabels[n].set_position((xn, yn))
cmap = plottedDots.get_cmap()
rgbcolors = [cmap(c) for c in cfg['display']['disc_color']]
if (targets is not None) and (globalVars.HIDETARGETS is True):
for i in targets:
globalVars.fade_frame_state = min(globalVars.fade_frame_state + 1, cfg['experiment']['fade']['numFrames'])
f_alpha = lambda n: 1 - n/cfg['experiment']['fade']['numFrames']
rgbcolors[i] = rgbcolors[i][:3] + (f_alpha(globalVars.fade_frame_state),)
plottedDots.set_color(rgbcolors)
frame_wct.append(time()) # Store the time of frame drawing
# # # Integrate the stimulus until the next frame # # #
SIMREADY = False
nSteps = 0 if globalVars.PAUSE else None
t_in_trial, phi, r = stim.advance(nSteps) # See class StructuredMotionStimulus in functions.py for dynamics
t += 1/fps
# # # Store the new state # # #
if t_in_trial > archive['t'][-1]:
archive['t'] += [t_in_trial]
archive['Phi'] += [phi]
archive['R'] += [r]
visible_dots = np.arange(N).tolist()
if phase in ("track", "predict", "after"):
for i in targets:
visible_dots.pop(visible_dots.index(i))
archive['visible'] += [visible_dots]
SIMREADY = True
# # # Test for end of stimulus presentation (-t option) # # #
if not INFRUN and (count >= ani_frames - 1):
print(" > Wall-clock time: %7.3fs, simulation time: %7.3fs, frame number: %5d" % (time() - t_start, t, count))
if mpl.get_backend() == "TkAgg": # TkAgg has this nasty bug: https://github.com/matplotlib/matplotlib/issues/9856/
print(" > Done. Please close the figure window.")
else:
print(" > Done. Figure window will be closed.")
pl.close(fig) # Close the figure and thus release the block
# # # Return the list of variable figure elements (required for blitting) # # #
if phase != "predict":
globalVars.frame_in_trial += 1
return plottedObjects
# # # Initialize figure and 1st plot # # #
if args.isFullscreen:
pl.matplotlib.rcParams['toolbar'] = 'None'
fig = pl.figure(figsize=cfg['display']['figsize'])
fig.canvas.set_window_title("Structured Motion Stimulus")
# # # Greeter # # #
if (args.greeter is not None) and mpl.get_backend() == "Qt5Agg":
from PyQt5 import QtWidgets
sizeObject = QtWidgets.QDesktopWidget().screenGeometry(-1)
w, h = sizeObject.width(), sizeObject.height()
from PyQt5.QtWidgets import QMessageBox
mbox = QMessageBox()
mbox.resize(w, h)
# Use <br> in greeter string for new line
mbox.information(mbox, "Prediction task", args.greeter)
# # # Init actual plot # # #
plottedObjects = init_plot()
if args.isFullscreen:
manager = pl.get_current_fig_manager()
if mpl.get_backend() == "TkAgg":
manager.full_screen_toggle()
elif mpl.get_backend() in ( "Qt4Agg", "Qt5Agg" ):
manager.window.showFullScreen()
# # # Init time domains # # #
t = 0. # sim time
t_start = time() # wall clock time
next_report_time = 0 # printing of progress (in wall clock time)
SIMREADY = True # A "lock" for security
# # # Number of frames (function calls) for the animation # # #
ani_frames = None if INFRUN else int(round(tmax * fps)) + 1
# # # Inter-frame interval # # #
if args.vidfile is not None:
interval = 1 # Render to video? As fast as possible
else:
interval = 1/fps*1000 # Live preview? 1 / frames per second
phaseChanger = PhaseChanger(cfg['experiment'])
# # # # # # # # # # # # # # # # # # # # # # # # #
# # # Main loop # # #
# # # # # # # # # # # # # # # # # # # # # # # # #
class Foo:
pass
globalVars = Foo()
globalVars.PAUSE = False
globalVars.HIDETARGETS = False
globalVars.COMPLETED = False
globalVars.fig = fig
globalVars.cursor = Cursor(ax=fig.get_axes()[0])
globalVars.phaseChanger = phaseChanger
globalVars.fade_frame_state = cfg['experiment']['fade']['numFrames']
globalVars.frame_in_trial = 0
globalVars.trial_number = 0
globalVars.trial_seed = None
# collect the predictions
globalVars.targets = np.copy(targets)
np.random.shuffle(globalVars.targets)
cmap = plottedObjects[0].get_cmap()
globalVars.targetColors = [cmap(c) for c in cfg['display']['disc_color'][globalVars.targets]]
globalVars.prediction = []
globalVars.choicetimes = [str(datetime.now())] # Fmt: [start of trial, start of decision period (rounded to frame), time of 1st choice, time of 2nd choice]
globalVars.f_points = cfg['experiment']['f_points']
nextColor = globalVars.targetColors[0]
globalVars.cursor.set_dotkwargs(color=nextColor, size=cfg['display']['disc_radius'][targets[0]])
connect_event_handlers(globalVars=globalVars)
plottedObjects += [globalVars.cursor.dot]
fig.gca().set_rmax(cfg['display']['axes_radius'])
def start_new_trial():
if (globalVars.trial_number > 0) and (pointbuffer is not None):
allPoints.append(pointbuffer)
globalVars.trial_number += 1
print("\n # # # # New trial (%d) # # # #\n" % globalVars.trial_number)
try:
globalVars.trial_seed = next(seedgenerator)
stim.set_seed(globalVars.trial_seed)
except StopIteration:
globalVars.trial_seed = None
print("No more seeds specified.")
globalVars.phaseChanger.newTrial()
globalVars.frame_in_trial = 0
globalVars.targets = np.copy(targets)
stim.rng.shuffle(globalVars.targets) # repetition trials have identical order
cmap = plottedObjects[0].get_cmap()
globalVars.targetColors = [cmap(c) for c in cfg['display']['disc_color'][globalVars.targets]]
globalVars.cursor.set_dotkwargs(color=globalVars.targetColors[0])
globalVars.prediction = []
globalVars.choicetimes = [str(datetime.now())]
globalVars.writtenToDisk = False
stim.reset_states()
global archive
archive['t'] = [stim.t_in_trial] # time points of frames (in sim time)
archive['Phi'] = [stim.Phi.copy()] # Angular locations and velocities for all N dots
archive['R'] = [stim.R.copy()] # Radial locations and velocities for all N dots
archive['visible'] = [np.arange(N)] # Which dots are visible at the time framw
globalVars.start_new_trial = start_new_trial
start_new_trial()
pointbuffer = None
allPoints = []
# FAKE END OF TRIAL
globalVars.frame_in_trial = 10000
globalVars.phaseChanger.setPredictionMade()
globalVars.writtenToDisk = True
globalVars.trial_number = 0
print(" > Animation starts.")
import matplotlib.animation as animation
# # # This is our diligent worker # # #
ani = animation.FuncAnimation(fig, update_dots, ani_frames, init_func=None,
fargs=(archive, plottedObjects), interval=interval, blit=True, repeat=False)
# # # Render the video or live preview # # #
if args.vidfile is not None: # Video? Use ani.save with external encoding library
Writer = animation.writers[cfg['video']['renderer']]
writer = Writer(metadata=dict(title="Structured Motion Stimulus", artist="<NAME>"),
fps=fps, codec=cfg['video']['codec'], bitrate=cfg['video']['bitrate'])
ani.save(args.vidfile, dpi=cfg['video']['dpi'], writer=writer)
print(" > Video saved to file '%s'." % args.vidfile)
else: # Life preview? Display figure and block further execution
pl.show(block=True)
# Frame rate and frame times
frame_wct = np.array(frame_wct)
dfwct = frame_wct[1:] - frame_wct[:-1] # Evaluate inter-frame intervals. Did the preview run at correct speed?
print(" > Avg frame interval was %.4fs with std deviation ±%.4fs (target was %.4fs)." % (dfwct.mean(), dfwct.std(), 1/fps))
if not DRYRUN:
fname = path.join(outdir, "frametimes.npy")
np.save(fname, frame_wct)
# Points
print(" > Total points: %d. Average: %.2f ± %.2f" % (np.sum(allPoints), np.mean(allPoints), np.std(allPoints)) )
if not DRYRUN:
fname = path.join(outdir, "points.npy")
np.save(fname, allPoints)
# # # # # # # # # # # # # # # # # # # # # # # # #
# # # Debriefing # # #
# # # # # # # # # # # # # # # # # # # # # # # # #
t_end = time()
print(" > Stimulus presentation complete (wall-clock duration incl overhead: %.3fs)" % (t_end-t_start))
| [
"pylab.close",
"matplotlib.rc",
"functions.write_trial_to_file",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"numpy.iinfo",
"matplotlib.animation.FuncAnimation",
"pylab.get_current_fig_manager",
"numpy.random.randint",
"pylab.figure",
"numpy.arange",
"numpy.mean",
"numpy.d... | [((758, 1000), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'formatter_class': 'RawTextHelpFormatter', 'description': '"""Structured Motion Stimuli for Chicken experiments"""', 'epilog': '"""If using ipython3, indicate end of ipython arg parser via \'--\':\n $ ipython3 play.py -- <args>"""'}), '(formatter_class=RawTextHelpFormatter, description=\n \'Structured Motion Stimuli for Chicken experiments\', epilog=\n """If using ipython3, indicate end of ipython arg parser via \'--\':\n $ ipython3 play.py -- <args>"""\n )\n', (772, 1000), False, 'from argparse import ArgumentParser, RawTextHelpFormatter\n'), ((2643, 2671), 'os.path.split', 'os.path.split', (['args.stimfile'], {}), '(args.stimfile)\n', (2656, 2671), False, 'import os\n'), ((2672, 2697), 'sys.path.append', 'sys.path.append', (['stimpath'], {}), '(stimpath)\n', (2687, 2697), False, 'import sys\n'), ((4674, 4702), 'numpy.random.seed', 'np.random.seed', (['args.rngseed'], {}), '(args.rngseed)\n', (4688, 4702), True, 'import numpy as np\n'), ((5082, 5109), 'numpy.insert', 'np.insert', (['seedlist', '(0)', '[0]'], {}), '(seedlist, 0, [0])\n', (5091, 5109), True, 'import numpy as np\n'), ((5953, 6004), 'matplotlib.rc', 'mpl.rc', (['"""figure"""'], {'dpi': "cfg['display']['monitor_dpi']"}), "('figure', dpi=cfg['display']['monitor_dpi'])\n", (5959, 6004), True, 'import matplotlib as mpl\n'), ((6792, 6826), 'functions.StructuredMotionStimulus', 'StructuredMotionStimulus', ([], {}), '(**kwargs)\n', (6816, 6826), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((7332, 7362), 'functions.create_dsl', 'create_dsl', (['human_readable_dsl'], {}), '(human_readable_dsl)\n', (7342, 7362), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((16818, 16862), 'pylab.figure', 'pl.figure', ([], {'figsize': "cfg['display']['figsize']"}), "(figsize=cfg['display']['figsize'])\n", (16827, 16862), True, 'import pylab as pl\n'), ((17732, 17738), 'time.time', 'time', ([], {}), '()\n', (17736, 17738), False, 'from time import time\n'), ((18248, 18279), 'functions.PhaseChanger', 'PhaseChanger', (["cfg['experiment']"], {}), "(cfg['experiment'])\n", (18260, 18279), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((18872, 18888), 'numpy.copy', 'np.copy', (['targets'], {}), '(targets)\n', (18879, 18888), True, 'import numpy as np\n'), ((18889, 18926), 'numpy.random.shuffle', 'np.random.shuffle', (['globalVars.targets'], {}), '(globalVars.targets)\n', (18906, 18926), True, 'import numpy as np\n'), ((19438, 19483), 'functions.connect_event_handlers', 'connect_event_handlers', ([], {'globalVars': 'globalVars'}), '(globalVars=globalVars)\n', (19460, 19483), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((21405, 21556), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'update_dots', 'ani_frames'], {'init_func': 'None', 'fargs': '(archive, plottedObjects)', 'interval': 'interval', 'blit': '(True)', 'repeat': '(False)'}), '(fig, update_dots, ani_frames, init_func=None, fargs\n =(archive, plottedObjects), interval=interval, blit=True, repeat=False)\n', (21428, 21556), True, 'import matplotlib.animation as animation\n'), ((22993, 22999), 'time.time', 'time', ([], {}), '()\n', (22997, 22999), False, 'from time import time\n'), ((3953, 4002), 'matplotlib.use', 'mpl.use', (["cfg['display']['backend_noninteractive']"], {}), "(cfg['display']['backend_noninteractive'])\n", (3960, 4002), True, 'import matplotlib as mpl\n'), ((4007, 4029), 'matplotlib.interactive', 'mpl.interactive', (['(False)'], {}), '(False)\n', (4022, 4029), True, 'import matplotlib as mpl\n'), ((4040, 4086), 'matplotlib.use', 'mpl.use', (["cfg['display']['backend_interactive']"], {}), "(cfg['display']['backend_interactive'])\n", (4047, 4086), True, 'import matplotlib as mpl\n'), ((4091, 4113), 'matplotlib.interactive', 'mpl.interactive', (['(False)'], {}), '(False)\n', (4106, 4113), True, 'import matplotlib as mpl\n'), ((4802, 4831), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.uint32'}), '([], dtype=np.uint32)\n', (4810, 4831), True, 'import numpy as np\n'), ((5042, 5069), 'numpy.random.shuffle', 'np.random.shuffle', (['seedlist'], {}), '(seedlist)\n', (5059, 5069), True, 'import numpy as np\n'), ((5577, 5589), 'numpy.diag', 'np.diag', (['lam'], {}), '(lam)\n', (5584, 5589), True, 'import numpy as np\n'), ((6164, 6176), 'functions.asciiL', 'asciiL', (['L', '(3)'], {}), '(L, 3)\n', (6170, 6176), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((6293, 6317), 'numpy.array', 'np.array', (['([tau_vphi] * M)'], {}), '([tau_vphi] * M)\n', (6301, 6317), True, 'import numpy as np\n'), ((7463, 7481), 'functions.create_outdir', 'create_outdir', (['dsl'], {}), '(dsl)\n', (7476, 7481), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((8262, 8312), 'pylab.matplotlib.colors.Normalize', 'pl.matplotlib.colors.Normalize', ([], {'vmin': '(0.0)', 'vmax': '(1.0)'}), '(vmin=0.0, vmax=1.0)\n', (8292, 8312), True, 'import pylab as pl\n'), ((17211, 17224), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (17222, 17224), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((17451, 17479), 'pylab.get_current_fig_manager', 'pl.get_current_fig_manager', ([], {}), '()\n', (17477, 17479), True, 'import pylab as pl\n'), ((20139, 20155), 'numpy.copy', 'np.copy', (['targets'], {}), '(targets)\n', (20146, 20155), True, 'import numpy as np\n'), ((22156, 22175), 'pylab.show', 'pl.show', ([], {'block': '(True)'}), '(block=True)\n', (22163, 22175), True, 'import pylab as pl\n'), ((22225, 22244), 'numpy.array', 'np.array', (['frame_wct'], {}), '(frame_wct)\n', (22233, 22244), True, 'import numpy as np\n'), ((3856, 3884), 'os.path.exists', 'os.path.exists', (['args.vidfile'], {}), '(args.vidfile)\n', (3870, 3884), False, 'import os\n'), ((4144, 4161), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (4159, 4161), True, 'import matplotlib as mpl\n'), ((4903, 4922), 'numpy.iinfo', 'np.iinfo', (['np.uint32'], {}), '(np.uint32)\n', (4911, 4922), True, 'import numpy as np\n'), ((4951, 5020), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'maxval', 'size': 'uniqueTrials', 'dtype': 'np.uint32'}), '(0, high=maxval, size=uniqueTrials, dtype=np.uint32)\n', (4968, 5020), True, 'import numpy as np\n'), ((7566, 7596), 'os.path.join', 'path.join', (['outdir', '"""config.py"""'], {}), "(outdir, 'config.py')\n", (7575, 7596), False, 'from os import path\n'), ((7632, 7670), 'os.path.join', 'path.join', (['outdir', '"""general_config.py"""'], {}), "(outdir, 'general_config.py')\n", (7641, 7670), False, 'from os import path\n'), ((8138, 8159), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(45)'], {}), '(0, 360, 45)\n', (8147, 8159), True, 'import numpy as np\n'), ((15186, 15192), 'time.time', 'time', ([], {}), '()\n', (15190, 15192), False, 'from time import time\n'), ((16979, 16996), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (16994, 16996), True, 'import matplotlib as mpl\n'), ((17487, 17504), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (17502, 17504), True, 'import matplotlib as mpl\n'), ((19114, 19128), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19126, 19128), False, 'from datetime import datetime\n'), ((20953, 20965), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (20962, 20965), True, 'import numpy as np\n'), ((22532, 22567), 'os.path.join', 'path.join', (['outdir', '"""frametimes.npy"""'], {}), "(outdir, 'frametimes.npy')\n", (22541, 22567), False, 'from os import path\n'), ((22576, 22601), 'numpy.save', 'np.save', (['fname', 'frame_wct'], {}), '(fname, frame_wct)\n', (22583, 22601), True, 'import numpy as np\n'), ((22767, 22798), 'os.path.join', 'path.join', (['outdir', '"""points.npy"""'], {}), "(outdir, 'points.npy')\n", (22776, 22798), False, 'from os import path\n'), ((22807, 22832), 'numpy.save', 'np.save', (['fname', 'allPoints'], {}), '(fname, allPoints)\n', (22814, 22832), True, 'import numpy as np\n'), ((7221, 7233), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (7230, 7233), True, 'import numpy as np\n'), ((9390, 9405), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (9398, 9405), True, 'import numpy as np\n'), ((14178, 14184), 'time.time', 'time', ([], {}), '()\n', (14182, 14184), False, 'from time import time\n'), ((14545, 14562), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (14554, 14562), True, 'import numpy as np\n'), ((16177, 16194), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (16192, 16194), True, 'import matplotlib as mpl\n'), ((16445, 16458), 'pylab.close', 'pl.close', (['fig'], {}), '(fig)\n', (16453, 16458), True, 'import pylab as pl\n'), ((17059, 17085), 'PyQt5.QtWidgets.QDesktopWidget', 'QtWidgets.QDesktopWidget', ([], {}), '()\n', (17083, 17085), False, 'from PyQt5 import QtWidgets\n'), ((17563, 17580), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (17578, 17580), True, 'import matplotlib as mpl\n'), ((20528, 20542), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20540, 20542), False, 'from datetime import datetime\n'), ((6340, 6357), 'numpy.diag', 'np.diag', (['tau_vphi'], {}), '(tau_vphi)\n', (6347, 6357), True, 'import numpy as np\n'), ((15704, 15716), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (15713, 15716), True, 'import numpy as np\n'), ((22673, 22690), 'numpy.sum', 'np.sum', (['allPoints'], {}), '(allPoints)\n', (22679, 22690), True, 'import numpy as np\n'), ((22692, 22710), 'numpy.mean', 'np.mean', (['allPoints'], {}), '(allPoints)\n', (22699, 22710), True, 'import numpy as np\n'), ((22712, 22729), 'numpy.std', 'np.std', (['allPoints'], {}), '(allPoints)\n', (22718, 22729), True, 'import numpy as np\n'), ((14375, 14381), 'time.time', 'time', ([], {}), '()\n', (14379, 14381), False, 'from time import time\n'), ((16137, 16143), 'time.time', 'time', ([], {}), '()\n', (16141, 16143), False, 'from time import time\n'), ((12844, 12881), 'functions.calculate_points', 'calculate_points', (['globalVars', 'archive'], {}), '(globalVars, archive)\n', (12860, 12881), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((12965, 13001), 'functions.build_data_dict', 'build_data_dict', (['globalVars', 'archive'], {}), '(globalVars, archive)\n', (12980, 13001), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((13022, 13066), 'functions.fname_of_trial', 'fname_of_trial', (['dsl', 'globalVars.trial_number'], {}), '(dsl, globalVars.trial_number)\n', (13036, 13066), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((13079, 13115), 'functions.write_trial_to_file', 'write_trial_to_file', (['fname', 'datadict'], {}), '(fname, datadict)\n', (13098, 13115), False, 'from functions import asciiL, StructuredMotionStimulus, connect_event_handlers, Cursor, PhaseChanger, create_dsl, create_outdir, fname_of_trial, write_trial_to_file, build_data_dict, calculate_points\n'), ((12533, 12547), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12545, 12547), False, 'from datetime import datetime\n')] |
import argparse
import cv2
import sounddevice as sd
import numpy as np
from wrapify.connect.wrapper import MiddlewareCommunicator
"""
Camera and Microphone listener + publisher
Here we demonstrate
1. Using the Image and AudioChunk messages
2. Single return wrapper functionality in conjunction with synchronous callbacks
3. The spawning of multiple processes specifying different functionality for listeners and publishers
Run:
# Alternative 1
# On machine 1 (or process 1): The audio stream publishing
python3 cam_mic.py --mode publish --stream audio --aud-source 0
# On machine 2 (or process 2): The video stream publishing
python3 cam_mic.py --mode publish --stream video --img-source 0
# On machine 3 (or process 3): The audio stream listening
python3 cam_mic.py --mode listen --stream audio
# On machine 4 (or process 4): The video stream listening
python3 cam_mic.py --mode listen --stream video
# Alternative 2 (concurrent audio and video publishing)
# On machine 1 (or process 1): The audio/video stream publishing
python3 cam_mic.py --mode publish --stream audio video --img-source 0 --aud-source 0
# On machine 2 (or process 2): The audio/video stream listening
python3 cam_mic.py --mode listen --stream audio video
"""
class CamMic(MiddlewareCommunicator):
def __init__(self, *args, stream=("audio", "video"), aud_source=0,
aud_rate=44100, aud_chunk=10000, aud_channels=1, img_source=0,
img_width=320, img_height=240, **kwargs):
super(MiddlewareCommunicator, self).__init__()
self.aud_source = aud_source
self.aud_rate = aud_rate
self.aud_chunk = aud_chunk
self.aud_channels = aud_channels
self.img_source = img_source
self.img_width = img_width
self.img_height = img_height
if "audio" in stream:
self.enable_audio = True
else:
self.enable_audio = False
if "video" in stream:
self.vid_cap = cv2.VideoCapture(img_source)
self.enable_video = True
else:
self.enable_video = False
@MiddlewareCommunicator.register("Image", "CamMic", "/cam_mic/cam_feed",
carrier="", width="$img_width", height="$img_height", rgb=True)
def collect_cam(self, img_width=320, img_height=240):
if self.vid_cap.isOpened():
# capture the video stream from the webcam
grabbed, img = self.vid_cap.read()
if not grabbed:
print("video not grabbed")
img = np.random.random((img_width, img_height, 3)) * 255
else:
print("video grabbed")
else:
print("video capturer not opened")
img = np.random.random((img_width, img_height, 3)) * 255
return img,
@MiddlewareCommunicator.register("AudioChunk", "CamMic", "/cam_mic/audio_feed",
carrier="", rate="$aud_rate", chunk="$aud_chunk", channels="$aud_channels")
def collect_mic(self, aud=None, aud_rate=44100, aud_chunk=int(44100/5), aud_channels=1):
aud = aud, aud_rate
return aud,
def capture_cam_mic(self):
if self.enable_audio:
# capture the audio stream from the microphone
with sd.InputStream(device=self.aud_source, channels=self.aud_channels, callback=self.__mic_callback__,
blocksize=self.aud_chunk,
samplerate=self.aud_rate):
while True:
pass
elif self.enable_video:
while True:
self.collect_cam()
def __mic_callback__(self, audio, frames, time, status):
if self.enable_video:
self.collect_cam(img_width=self.img_width, img_height=self.img_height)
self.collect_mic(audio, aud_rate=self.aud_rate, aud_chunk=self.aud_chunk, aud_channels=self.aud_channels)
def __exit__(self, exc_type, exc_val, exc_tb):
self.vid_cap.release()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="publish", choices={"publish", "listen"}, help="The transmission mode")
parser.add_argument("--stream", nargs="+", default=["video", "audio"], choices={"video", "audio"}, help="The streamed sensor data")
parser.add_argument("--img-source", type=int, default=0, help="The video capture device id (int camera id)")
parser.add_argument("--img-width", type=int, default=320, help="The image width")
parser.add_argument("--img-height", type=int, default=240, help="The image height")
parser.add_argument("--aud-source", type=int, default=0, help="The audio capture device id (int micrphone id)")
parser.add_argument("--aud-rate", type=int, default=44100, help="The audio sampling rate")
parser.add_argument("--aud-channels", type=int, default=1, help="The audio channels")
parser.add_argument("--aud-chunk", type=int, default=10000, help="The transmitted audio chunk size")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
cam_mic = CamMic(stream=args.stream)
if args.mode == "publish":
cam_mic.activate_communication("collect_cam", mode="publish")
cam_mic.activate_communication("collect_mic", mode="publish")
cam_mic.capture_cam_mic()
if args.mode == "listen":
cam_mic.activate_communication("collect_cam", mode="listen")
cam_mic.activate_communication("collect_mic", mode="listen")
while True:
if "audio" in args.stream:
aud, = cam_mic.collect_mic(aud_source=args.aud_source, aud_rate=args.aud_rate, aud_chunk=args.aud_chunk, aud_channels=args.aud_channels)
else:
aud = None
if "video" in args.stream:
img, = cam_mic.collect_cam(img_source=args.img_source, img_width=args.img_width, img_height=args.img_height)
else:
img = None
if img is not None:
cv2.imshow("Received Image", img)
cv2.waitKey(1)
if aud is not None:
print(aud)
sd.play(aud[0].flatten(), samplerate=aud[1])
sd.wait(1)
| [
"argparse.ArgumentParser",
"cv2.waitKey",
"wrapify.connect.wrapper.MiddlewareCommunicator.register",
"cv2.VideoCapture",
"sounddevice.InputStream",
"numpy.random.random",
"sounddevice.wait",
"cv2.imshow"
] | [((2167, 2306), 'wrapify.connect.wrapper.MiddlewareCommunicator.register', 'MiddlewareCommunicator.register', (['"""Image"""', '"""CamMic"""', '"""/cam_mic/cam_feed"""'], {'carrier': '""""""', 'width': '"""$img_width"""', 'height': '"""$img_height"""', 'rgb': '(True)'}), "('Image', 'CamMic', '/cam_mic/cam_feed',\n carrier='', width='$img_width', height='$img_height', rgb=True)\n", (2198, 2306), False, 'from wrapify.connect.wrapper import MiddlewareCommunicator\n'), ((2906, 3068), 'wrapify.connect.wrapper.MiddlewareCommunicator.register', 'MiddlewareCommunicator.register', (['"""AudioChunk"""', '"""CamMic"""', '"""/cam_mic/audio_feed"""'], {'carrier': '""""""', 'rate': '"""$aud_rate"""', 'chunk': '"""$aud_chunk"""', 'channels': '"""$aud_channels"""'}), "('AudioChunk', 'CamMic',\n '/cam_mic/audio_feed', carrier='', rate='$aud_rate', chunk='$aud_chunk',\n channels='$aud_channels')\n", (2937, 3068), False, 'from wrapify.connect.wrapper import MiddlewareCommunicator\n'), ((4134, 4159), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4157, 4159), False, 'import argparse\n'), ((2043, 2071), 'cv2.VideoCapture', 'cv2.VideoCapture', (['img_source'], {}), '(img_source)\n', (2059, 2071), False, 'import cv2\n'), ((2829, 2873), 'numpy.random.random', 'np.random.random', (['(img_width, img_height, 3)'], {}), '((img_width, img_height, 3))\n', (2845, 2873), True, 'import numpy as np\n'), ((3377, 3532), 'sounddevice.InputStream', 'sd.InputStream', ([], {'device': 'self.aud_source', 'channels': 'self.aud_channels', 'callback': 'self.__mic_callback__', 'blocksize': 'self.aud_chunk', 'samplerate': 'self.aud_rate'}), '(device=self.aud_source, channels=self.aud_channels, callback\n =self.__mic_callback__, blocksize=self.aud_chunk, samplerate=self.aud_rate)\n', (3391, 3532), True, 'import sounddevice as sd\n'), ((6126, 6159), 'cv2.imshow', 'cv2.imshow', (['"""Received Image"""', 'img'], {}), "('Received Image', img)\n", (6136, 6159), False, 'import cv2\n'), ((6176, 6190), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6187, 6190), False, 'import cv2\n'), ((6327, 6337), 'sounddevice.wait', 'sd.wait', (['(1)'], {}), '(1)\n', (6334, 6337), True, 'import sounddevice as sd\n'), ((2642, 2686), 'numpy.random.random', 'np.random.random', (['(img_width, img_height, 3)'], {}), '((img_width, img_height, 3))\n', (2658, 2686), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from PIL import Image
import os
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
data = []
labels = []
classes = 43
cur_path = os.getcwd()
#Retrieving the images and their labels
for i in range(classes):
path = os.path.join(cur_path,'train',str(i))
images = os.listdir(path)
for a in images:
try:
image = Image.open(path + '\\'+ a)
image = image.resize((30,30))
image = np.array(image)
#sim = Image.fromarray(image)
data.append(image)
labels.append(i)
except:
print("Error loading image")
#Converting lists into numpy arrays
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
#Splitting training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
#Converting the labels into one hot encoding
y_train = to_categorical(y_train, 43)
y_test = to_categorical(y_test, 43)
#Building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=X_train.shape[1:]))
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation='softmax'))
#Compilation of the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 15
history = model.fit(X_train, y_train, batch_size=32, epochs=epochs, validation_data=(X_test, y_test))
model.save("my_model.h5")
#plotting graphs for accuracy
plt.figure(0)
plt.plot(history.history['accuracy'], label='training accuracy')
plt.plot(history.history['val_accuracy'], label='val accuracy')
plt.title('Accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
plt.figure(1)
plt.plot(history.history['loss'], label='training loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
#testing accuracy on test dataset
from sklearn.metrics import accuracy_score
y_test = pd.read_csv('Test.csv')
labels = y_test["ClassId"].values
imgs = y_test["Path"].values
data=[]
for img in imgs:
image = Image.open(img)
image = image.resize((30,30))
data.append(np.array(image))
X_test=np.array(data)
pred = model.predict_classes(X_test)
#Accuracy with the test data
from sklearn.metrics import accuracy_score
print(accuracy_score(labels, pred))
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"keras.layers.MaxPool2D",
"matplotlib.pyplot.figure",
"keras.layers.Flatten",
"keras.utils.to_categorical",
"matplotlib.pyplot.show",
"keras.layers.Dropout",
"matplotlib.py... | [((410, 421), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (419, 421), False, 'import os\n'), ((950, 964), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (958, 964), True, 'import numpy as np\n'), ((975, 991), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (983, 991), True, 'import numpy as np\n'), ((1104, 1166), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, labels, test_size=0.2, random_state=42)\n', (1120, 1166), False, 'from sklearn.model_selection import train_test_split\n'), ((1293, 1320), 'keras.utils.to_categorical', 'to_categorical', (['y_train', '(43)'], {}), '(y_train, 43)\n', (1307, 1320), False, 'from keras.utils import to_categorical\n'), ((1331, 1357), 'keras.utils.to_categorical', 'to_categorical', (['y_test', '(43)'], {}), '(y_test, 43)\n', (1345, 1357), False, 'from keras.utils import to_categorical\n'), ((1390, 1402), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1400, 1402), False, 'from keras.models import Sequential, load_model\n'), ((2289, 2302), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (2299, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2368), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {'label': '"""training accuracy"""'}), "(history.history['accuracy'], label='training accuracy')\n", (2312, 2368), True, 'import matplotlib.pyplot as plt\n'), ((2370, 2433), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {'label': '"""val accuracy"""'}), "(history.history['val_accuracy'], label='val accuracy')\n", (2378, 2433), True, 'import matplotlib.pyplot as plt\n'), ((2435, 2456), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy"""'], {}), "('Accuracy')\n", (2444, 2456), True, 'import matplotlib.pyplot as plt\n'), ((2458, 2478), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2468, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (2490, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2504, 2516), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2514, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2518, 2528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2526, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2532, 2545), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2542, 2545), True, 'import matplotlib.pyplot as plt\n'), ((2547, 2603), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {'label': '"""training loss"""'}), "(history.history['loss'], label='training loss')\n", (2555, 2603), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2660), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {'label': '"""val loss"""'}), "(history.history['val_loss'], label='val loss')\n", (2613, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2662, 2679), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (2671, 2679), True, 'import matplotlib.pyplot as plt\n'), ((2681, 2701), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2691, 2701), True, 'import matplotlib.pyplot as plt\n'), ((2703, 2721), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2713, 2721), True, 'import matplotlib.pyplot as plt\n'), ((2723, 2735), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2733, 2735), True, 'import matplotlib.pyplot as plt\n'), ((2737, 2747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2745, 2747), True, 'import matplotlib.pyplot as plt\n'), ((2841, 2864), 'pandas.read_csv', 'pd.read_csv', (['"""Test.csv"""'], {}), "('Test.csv')\n", (2852, 2864), True, 'import pandas as pd\n'), ((3071, 3085), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3079, 3085), True, 'import numpy as np\n'), ((556, 572), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (566, 572), False, 'import os\n'), ((1414, 1507), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5, 5)', 'activation': '"""relu"""', 'input_shape': 'X_train.shape[1:]'}), "(filters=32, kernel_size=(5, 5), activation='relu', input_shape=\n X_train.shape[1:])\n", (1420, 1507), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1514, 1571), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5, 5)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(5, 5), activation='relu')\n", (1520, 1571), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1583, 1610), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1592, 1610), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1623, 1641), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (1630, 1641), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1654, 1711), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (1660, 1711), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1724, 1781), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (1730, 1781), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1794, 1821), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1803, 1821), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1834, 1852), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (1841, 1852), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1865, 1874), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1872, 1874), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1887, 1916), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1892, 1916), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1929, 1946), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (1936, 1946), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1959, 1990), 'keras.layers.Dense', 'Dense', (['(43)'], {'activation': '"""softmax"""'}), "(43, activation='softmax')\n", (1964, 1990), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((2976, 2991), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (2986, 2991), False, 'from PIL import Image\n'), ((3209, 3237), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'pred'], {}), '(labels, pred)\n', (3223, 3237), False, 'from sklearn.metrics import accuracy_score\n'), ((3044, 3059), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3052, 3059), True, 'import numpy as np\n'), ((632, 659), 'PIL.Image.open', 'Image.open', (["(path + '\\\\' + a)"], {}), "(path + '\\\\' + a)\n", (642, 659), False, 'from PIL import Image\n'), ((723, 738), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (731, 738), True, 'import numpy as np\n')] |
import numpy as np
from keras.datasets import mnist
from keras.utils import to_categorical
from hyperactive import SimulatedAnnealingOptimizer
(X_train, y_train), (X_test, y_test) = mnist.load_data()
size = 6000
X_train = X_train[0:size]
y_train = y_train[0:size]
X_train = X_train.reshape(size, 28, 28, 1)
X_test = X_test.reshape(10000, 28, 28, 1)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# this defines the structure of the model and the search space in each layer
search_config = {
"keras.compile.0": {"loss": ["categorical_crossentropy"], "optimizer": ["adam"]},
"keras.fit.0": {"epochs": [3], "batch_size": [500], "verbose": [0]},
"keras.layers.Conv2D.1": {
"filters": [32, 64, 128],
"kernel_size": range(3, 4),
"activation": ["relu"],
"input_shape": [(28, 28, 1)],
},
"keras.layers.MaxPooling2D.2": {"pool_size": [(2, 2)]},
"keras.layers.Conv2D.3": {
"filters": [32, 64, 128],
"kernel_size": [3],
"activation": ["relu"],
},
"keras.layers.MaxPooling2D.4": {"pool_size": [(2, 2)]},
"keras.layers.Conv2D.5": {
"filters": [32, 64, 128],
"kernel_size": [3],
"activation": ["relu"],
"input_shape": [(28, 28, 1)],
},
"keras.layers.MaxPooling2D.6": {"pool_size": [(2, 2)]},
"keras.layers.Flatten.7": {},
"keras.layers.Dense.8": {"units": range(30, 200, 10), "activation": ["softmax"]},
"keras.layers.Dropout.9": {"rate": list(np.arange(0.4, 0.8, 0.1))},
"keras.layers.Dense.10": {"units": [10], "activation": ["softmax"]},
}
start_point = {
"keras.compile.0": {"loss": ["categorical_crossentropy"], "optimizer": ["adam"]},
"keras.fit.0": {"epochs": [3], "batch_size": [500], "verbose": [0]},
"keras.layers.Conv2D.1": {
"filters": [64],
"kernel_size": [3],
"activation": ["relu"],
"input_shape": [(28, 28, 1)],
},
"keras.layers.MaxPooling2D.2": {"pool_size": [(2, 2)]},
"keras.layers.Conv2D.3": {
"filters": [32],
"kernel_size": [3],
"activation": ["relu"],
"input_shape": [(28, 28, 1)],
},
"keras.layers.MaxPooling2D.4": {"pool_size": [(2, 2)]},
"keras.layers.Conv2D.5": {
"filters": [32],
"kernel_size": [3],
"activation": ["relu"],
"input_shape": [(28, 28, 1)],
},
"keras.layers.MaxPooling2D.6": {"pool_size": [(2, 2)]},
"keras.layers.Flatten.7": {},
"keras.layers.Dense.8": {"units": [50], "activation": ["softmax"]},
"keras.layers.Dropout.9": {"rate": [0.4]},
"keras.layers.Dense.10": {"units": [10], "activation": ["softmax"]},
}
opt = SimulatedAnnealingOptimizer(search_config, n_iter=3, warm_start=start_point)
# search best hyperparameter for given data
opt.fit(X_train, y_train)
# predict from test data
prediction = opt.predict(X_test)
# calculate accuracy score
score = opt.score(X_test, y_test)
| [
"hyperactive.SimulatedAnnealingOptimizer",
"keras.datasets.mnist.load_data",
"numpy.arange",
"keras.utils.to_categorical"
] | [((185, 202), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (200, 202), False, 'from keras.datasets import mnist\n'), ((366, 389), 'keras.utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (380, 389), False, 'from keras.utils import to_categorical\n'), ((399, 421), 'keras.utils.to_categorical', 'to_categorical', (['y_test'], {}), '(y_test)\n', (413, 421), False, 'from keras.utils import to_categorical\n'), ((2681, 2757), 'hyperactive.SimulatedAnnealingOptimizer', 'SimulatedAnnealingOptimizer', (['search_config'], {'n_iter': '(3)', 'warm_start': 'start_point'}), '(search_config, n_iter=3, warm_start=start_point)\n', (2708, 2757), False, 'from hyperactive import SimulatedAnnealingOptimizer\n'), ((1502, 1526), 'numpy.arange', 'np.arange', (['(0.4)', '(0.8)', '(0.1)'], {}), '(0.4, 0.8, 0.1)\n', (1511, 1526), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import numpy as np
import shutil
import os
import os.path as osp
import imageio
from skimage import transform
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa: E402
__all__ = ['visualize_ranked_results']
GRID_SPACING = 10
QUERY_EXTRA_SPACING = 90
BW = 5 # border width
GREEN = (0, 255, 0)
RED = (0, 0, 255)
def visualize_ranked_results(
distmat, query, width=128, height=256, save_dir='', topk=10, resize=True
):
"""Visualizes ranked results.
Ranks will be plotted in a single figure.
Args:
distmat (numpy.ndarray): dist matrix of shape (num_query, num_query).
query (list of tuples): a query set which is a list of tuples
of (img_path(s), pid, camid, dsetid).
width (int, optional): resized image width. Default is 128.
height (int, optional): resized image height. Default is 256.
save_dir (str): directory to save output images.
topk (int, optional): denoting top-k images in the rank list to be
visualized. Default is 10.
"""
assert distmat.shape[0] == distmat.shape[1]
print('distmat shape', distmat.shape)
num_q = distmat.shape[0]
os.makedirs(save_dir, exist_ok=True)
print('# query: {}'.format(num_q))
print('Visualizing top-{} ranks ...'.format(topk))
assert num_q == len(query)
indices = np.argsort(distmat, axis=1)
def _cp_img_to(src, dst, rank, prefix, matched=False):
"""
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, (tuple, list)):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
os.makedirs(dst, exist_ok=True)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(
dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src)
)
shutil.copy(src, dst)
for q_idx in range(num_q):
qimg_path, qpid = query[q_idx]['impath'], query[q_idx]['pid']
qimg_path_name = qimg_path
qimg = imageio.imread(qimg_path)
if resize:
qimg = transform.resize(qimg, (width, height), order=3, anti_aliasing=True)
ncols = topk + 1
fig, ax = plt.subplots(nrows=1, ncols=ncols, figsize=(ncols * 4, 4))
ax[0].imshow(qimg)
ax[0].set_title('Query {}'.format(qpid[:25]))
ax[0].axis('off')
rank_idx = 1
for g_idx in indices[q_idx, 1:]:
gimg_path, gpid = query[g_idx]['impath'], query[g_idx]['pid']
matched = gpid == qpid
border_color = 'green' if matched else 'red'
gimg = imageio.imread(gimg_path)
if resize:
gimg = transform.resize(
gimg, (width, height), order=3, anti_aliasing=True
)
ax[rank_idx].imshow(gimg)
ax[rank_idx].set_title('{}'.format(gpid[:25]))
ax[rank_idx].tick_params(
axis='both',
which='both',
bottom=False,
top=False,
labelbottom=False,
right=False,
left=False,
labelleft=False,
)
for loc, spine in ax[rank_idx].spines.items():
spine.set_color(border_color)
spine.set_linewidth(BW)
rank_idx += 1
if rank_idx > topk:
break
# Save figure
fig_name = osp.basename(osp.splitext(qimg_path_name)[0])
fig_path = osp.join(save_dir, fig_name + '.jpg')
fig.savefig(fig_path, format='jpg', dpi=100, bbox_inches='tight', facecolor='w')
plt.close(fig)
if (q_idx + 1) % 10 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
if q_idx >= 100:
break
print('Done. Images have been saved to "{}" ...'.format(save_dir))
| [
"os.makedirs",
"os.path.join",
"os.path.basename",
"matplotlib.pyplot.close",
"imageio.imread",
"numpy.argsort",
"matplotlib.use",
"skimage.transform.resize",
"os.path.splitext",
"matplotlib.pyplot.subplots",
"shutil.copy"
] | [((208, 229), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (222, 229), False, 'import matplotlib\n'), ((1278, 1314), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (1289, 1314), False, 'import os\n'), ((1457, 1484), 'numpy.argsort', 'np.argsort', (['distmat'], {'axis': '(1)'}), '(distmat, axis=1)\n', (1467, 1484), True, 'import numpy as np\n'), ((2554, 2579), 'imageio.imread', 'imageio.imread', (['qimg_path'], {}), '(qimg_path)\n', (2568, 2579), False, 'import imageio\n'), ((2730, 2788), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': 'ncols', 'figsize': '(ncols * 4, 4)'}), '(nrows=1, ncols=ncols, figsize=(ncols * 4, 4))\n', (2742, 2788), True, 'import matplotlib.pyplot as plt\n'), ((4049, 4086), 'os.path.join', 'osp.join', (['save_dir', "(fig_name + '.jpg')"], {}), "(save_dir, fig_name + '.jpg')\n", (4057, 4086), True, 'import os.path as osp\n'), ((4184, 4198), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4193, 4198), True, 'import matplotlib.pyplot as plt\n'), ((2114, 2145), 'os.makedirs', 'os.makedirs', (['dst'], {'exist_ok': '(True)'}), '(dst, exist_ok=True)\n', (2125, 2145), False, 'import os\n'), ((2379, 2400), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (2390, 2400), False, 'import shutil\n'), ((2618, 2686), 'skimage.transform.resize', 'transform.resize', (['qimg', '(width, height)'], {'order': '(3)', 'anti_aliasing': '(True)'}), '(qimg, (width, height), order=3, anti_aliasing=True)\n', (2634, 2686), False, 'from skimage import transform\n'), ((3146, 3171), 'imageio.imread', 'imageio.imread', (['gimg_path'], {}), '(gimg_path)\n', (3160, 3171), False, 'import imageio\n'), ((2195, 2221), 'shutil.copy', 'shutil.copy', (['img_path', 'dst'], {}), '(img_path, dst)\n', (2206, 2221), False, 'import shutil\n'), ((3218, 3286), 'skimage.transform.resize', 'transform.resize', (['gimg', '(width, height)'], {'order': '(3)', 'anti_aliasing': '(True)'}), '(gimg, (width, height), order=3, anti_aliasing=True)\n', (3234, 3286), False, 'from skimage import transform\n'), ((3997, 4025), 'os.path.splitext', 'osp.splitext', (['qimg_path_name'], {}), '(qimg_path_name)\n', (4009, 4025), True, 'import os.path as osp\n'), ((2335, 2352), 'os.path.basename', 'osp.basename', (['src'], {}), '(src)\n', (2347, 2352), True, 'import os.path as osp\n')] |
import dateutil.parser as dp
import datetime
import requests
import math
import json
import tqdm
import numpy as np
import os
import dateutil.parser as dp
import tensorflow as tf
from keras.models import Sequential
from keras.models import Model, load_model
from keras.layers import Dense
import json
import math
from sklearn.preprocessing import MinMaxScaler
from keras import backend as K
import keras.losses
import keras
import pywt
import time
import pandas as pd
WINDOW_SIZE = 24*10
# model_path = '../models/model_all_square_working.h5'
model_path = '../models/model_all_full.h5'
MAX_NUM_SLOTS = 30
TIME_ZONE = '+05:30'
gdaxBaseUrl = 'https://api.gdax.com/products/{}/candles?start={}&end={}&granularity={}'
cmcBaseUrl = 'https://coinmarketcap.com/currencies/bitcoin/historical-data/?'
if not os.path.exists('exchange_rates'):
os.makedirs('exchange_rates')
out_file = 'exchange_rates/{}.csv'
def convertToTimestamp(isoFormat):
parsed_time = dp.parse(isoFormat)
return int(parsed_time.strftime('%s'))
def convertTimeToMidnight(timestamp):
d = datetime.datetime.fromtimestamp(timestamp)
diff = d.hour*3600 + d.minute*60 + d.second
return timestamp-diff
def convertToIso(timestamp):
time_obj = datetime.datetime.fromtimestamp(timestamp)
return time_obj.isoformat()
def convertIsoToStandardDate(date):
return datetime.datetime.fromtimestamp(int(date)).strftime('%Y%m%d')
def getHistoricalDataFromGdax(product, start, end, granularity):
data = []
start_timestamp = convertToTimestamp(start)
end_timestamp = convertToTimestamp(end)
num_data = (end_timestamp-start_timestamp)/granularity
# print(num_data)
num_slots = math.ceil(num_data/MAX_NUM_SLOTS)
# print(num_slots)
print("Started Retrieving Data from APIs")
for index in range(num_slots):
cur_start = convertToIso(start_timestamp + index*granularity*MAX_NUM_SLOTS)
cur_end = convertToIso(min(start_timestamp + (index+1)*granularity*MAX_NUM_SLOTS, end_timestamp))
# print(cur_start,cur_end)
url = gdaxBaseUrl.format(product,cur_start,cur_end,granularity)
#print(url)
response = requests.get(url)
if response.status_code == 200:
s = json.loads(response.content.decode())
#print(len(s))
previousDate = 0
for row in s[::-1]:
currentDate = convertIsoToStandardDate(row[0])
print(currentDate)
if currentDate != previousDate:
cur_cap = getMcapFromCoinMarketCap(currentDate)
if cur_cap is not None:
volume = cur_cap
# row[0] = convertToIso(row[0] - 19800)
del row[0]
row.append(volume)
# print(len(row))
data.append(row)
previousDate = currentDate
else:
pass
# print("Current End : " + cur_end)
# print("End : " + end)
if cur_end >= end:
print("Finished Retrieving Data from APIs")
return data;
return data
def getMcapFromCoinMarketCap(currentDate):
try:
# get market info for bitcoin from the start of 2016 to the current day
bitcoin_market_info = pd.read_html(cmcBaseUrl + 'start=' + currentDate + '&end=' + currentDate)[0]
# convert the date string to the correct date format
print('bitcoin market info')
print(bitcoin_market_info['Date'])
bitcoin_market_info = bitcoin_market_info.assign(Date=pd.to_datetime(bitcoin_market_info['Date']))
# when Volume is equal to '-' convert it to 0
# print('-----------------------------------')
# print(bitcoin_market_info['Market Cap'].values=='-')
#TODO
if bitcoin_market_info['Market Cap'].values=='-':
bitcoin_market_info.loc[bitcoin_market_info['Market Cap'].values=='-','Market Cap']=0
# if bitcoin_market_info['Market Cap'].isnull():
# bitcoin_market_info.loc[bitcoin_market_info['Market Cap'].isnull(),'Market Cap']=0
# convert to int
bitcoin_market_info['Market Cap'] = bitcoin_market_info['Market Cap'].astype('int64')
# look at the first row
# print(bitcoin_market_info.shape[0])
return bitcoin_market_info['Market Cap']
except ValueError:
print('======================Value error from market cap api===================')
return None
cur_time = time.time()-10*60*60
start = convertToIso(cur_time-10*24*60*60-10*60*60)
end = convertToIso(cur_time)
granularity = 3600
#TODO: get in ist 00:00 from the api
print('Getting BTC exchange rates...')
rates_btc = getHistoricalDataFromGdax('BTC-USD',start,end,granularity)
# np.savetxt(out_file.format('bitcoin'),rates_btc,delimiter=',',header='time, low, high, open, close, volume')
# np.savetxt(out_file.format('etherium'),rates_btc,delimiter=',',header='time, low, high, open, close, volume')
#make prediction
def fitScaler(data):
scaler = MinMaxScaler()
scaler.fit(data)
return scaler
def normailize_data(data, scaler):
new_data = scaler.transform(data)
return new_data
def custom_objective(y_true, y_pred):
alpha = 100
loss = K.switch(K.less(y_true*y_pred,0),\
alpha*y_pred**2-K.sign(y_true)*y_pred+K.abs(y_true),\
K.square(y_true-y_pred))
return K.mean(loss,axis=1)
keras.losses.custom_objective = custom_objective
# print(rates_btc)
rates_btc = np.array(rates_btc)
#read the data
rates = rates_btc[:,:]
# print('data_x shape')
# print(data_x.shape)
#get the scaler
df_data = pd.read_csv('../data/bitcoin_historical_hourly_data.csv',sep=',')
data = df_data.as_matrix()
print('data shape')
print(data.shape)
TRAIN_TEST_SPLIT = 0.8
train_index = math.floor(TRAIN_TEST_SPLIT*len(data))
scaler = fitScaler(data[:train_index+WINDOW_SIZE,1:])
# create model
model = load_model(model_path)
def makeDwtFeatures(rates_window):
dwt = []
for col in range(len(rates_window[0])):
data_col = rates_window[:,col]
cA, cD = pywt.dwt(data_col,'db1')
dwt_col = np.concatenate((cA,cD),axis=-1)
dwt.extend(dwt_col.tolist())
return dwt
rates_norm = scaler.transform(rates)
all_rates = rates_norm.copy()
num_predictions = 24*7
preds_arr = []
timestamp_arr = []
for index in range(num_predictions):
print('shape of rates')
print(rates.shape)
start_index = rates_norm.shape[0]%240
dwt = makeDwtFeatures(rates_norm[start_index:])
# print('dwt.shape')
# print(dwt.shape)
# Use the model for predictions
preds = model.predict(np.array([dwt]))[0]
# preds = [cur_time+index*60*60] + preds
preds_arr.append(preds)
# print(preds)
all_rates = np.concatenate((all_rates,[preds]),axis=0)
print('all rates shape')
print(all_rates.shape)
rates_norm = all_rates[index+1:,:]
timestamp_arr.append(cur_time+60*60*index)
timestamp_arr = np.expand_dims(timestamp_arr,axis=1)
# print(timestamp_arr)
preds_arr_denormalised = scaler.inverse_transform(preds_arr)
preds_arr_denormalised = np.concatenate((timestamp_arr,preds_arr_denormalised),axis=-1)
arr_to_write = []
for el in preds_arr_denormalised:
tmp = []
tmp.append(datetime.datetime.fromtimestamp(el[0]).strftime('%Y-%m-%d %H:%M:%S'))
for el_1 in el[1:]:
tmp.append(el_1)
arr_to_write.append(tmp)
# print(len(preds_arr))
df_preds = pd.DataFrame(arr_to_write)
df_preds.to_csv('predictions.csv',sep=',',header=['timestamp','low(USD)','high(USD)','open(USD)','close(USD)','volume(USD)','market_cap'],index=0)
# np.savetxt('predictions.csv',preds_arr_denormalised,delimiter=',',header='timestamp,low,high,open,close,volume,market_cap') | [
"keras.models.load_model",
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"keras.backend.abs",
"keras.backend.less",
"pandas.DataFrame",
"pywt.dwt",
"os.path.exists",
"requests.get",
"dateutil.parser.parse",
"math.ceil",
"pandas.to_datetime",
"datetime.datetime.fromtimestamp",
"n... | [((5015, 5034), 'numpy.array', 'np.array', (['rates_btc'], {}), '(rates_btc)\n', (5023, 5034), True, 'import numpy as np\n'), ((5146, 5212), 'pandas.read_csv', 'pd.read_csv', (['"""../data/bitcoin_historical_hourly_data.csv"""'], {'sep': '""","""'}), "('../data/bitcoin_historical_hourly_data.csv', sep=',')\n", (5157, 5212), True, 'import pandas as pd\n'), ((5431, 5453), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (5441, 5453), False, 'from keras.models import Model, load_model\n'), ((6398, 6435), 'numpy.expand_dims', 'np.expand_dims', (['timestamp_arr'], {'axis': '(1)'}), '(timestamp_arr, axis=1)\n', (6412, 6435), True, 'import numpy as np\n'), ((6544, 6608), 'numpy.concatenate', 'np.concatenate', (['(timestamp_arr, preds_arr_denormalised)'], {'axis': '(-1)'}), '((timestamp_arr, preds_arr_denormalised), axis=-1)\n', (6558, 6608), True, 'import numpy as np\n'), ((6853, 6879), 'pandas.DataFrame', 'pd.DataFrame', (['arr_to_write'], {}), '(arr_to_write)\n', (6865, 6879), True, 'import pandas as pd\n'), ((804, 836), 'os.path.exists', 'os.path.exists', (['"""exchange_rates"""'], {}), "('exchange_rates')\n", (818, 836), False, 'import os\n'), ((839, 868), 'os.makedirs', 'os.makedirs', (['"""exchange_rates"""'], {}), "('exchange_rates')\n", (850, 868), False, 'import os\n'), ((955, 974), 'dateutil.parser.parse', 'dp.parse', (['isoFormat'], {}), '(isoFormat)\n', (963, 974), True, 'import dateutil.parser as dp\n'), ((1059, 1101), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (1090, 1101), False, 'import datetime\n'), ((1212, 1254), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (1243, 1254), False, 'import datetime\n'), ((1644, 1679), 'math.ceil', 'math.ceil', (['(num_data / MAX_NUM_SLOTS)'], {}), '(num_data / MAX_NUM_SLOTS)\n', (1653, 1679), False, 'import math\n'), ((4020, 4031), 'time.time', 'time.time', ([], {}), '()\n', (4029, 4031), False, 'import time\n'), ((4566, 4580), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4578, 4580), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4913, 4933), 'keras.backend.mean', 'K.mean', (['loss'], {'axis': '(1)'}), '(loss, axis=1)\n', (4919, 4933), True, 'from keras import backend as K\n'), ((6208, 6252), 'numpy.concatenate', 'np.concatenate', (['(all_rates, [preds])'], {'axis': '(0)'}), '((all_rates, [preds]), axis=0)\n', (6222, 6252), True, 'import numpy as np\n'), ((2077, 2094), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2089, 2094), False, 'import requests\n'), ((4789, 4815), 'keras.backend.less', 'K.less', (['(y_true * y_pred)', '(0)'], {}), '(y_true * y_pred, 0)\n', (4795, 4815), True, 'from keras import backend as K\n'), ((4877, 4902), 'keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (4885, 4902), True, 'from keras import backend as K\n'), ((5585, 5610), 'pywt.dwt', 'pywt.dwt', (['data_col', '"""db1"""'], {}), "(data_col, 'db1')\n", (5593, 5610), False, 'import pywt\n'), ((5622, 5655), 'numpy.concatenate', 'np.concatenate', (['(cA, cD)'], {'axis': '(-1)'}), '((cA, cD), axis=-1)\n', (5636, 5655), True, 'import numpy as np\n'), ((2932, 3005), 'pandas.read_html', 'pd.read_html', (["(cmcBaseUrl + 'start=' + currentDate + '&end=' + currentDate)"], {}), "(cmcBaseUrl + 'start=' + currentDate + '&end=' + currentDate)\n", (2944, 3005), True, 'import pandas as pd\n'), ((4857, 4870), 'keras.backend.abs', 'K.abs', (['y_true'], {}), '(y_true)\n', (4862, 4870), True, 'from keras import backend as K\n'), ((6092, 6107), 'numpy.array', 'np.array', (['[dwt]'], {}), '([dwt])\n', (6100, 6107), True, 'import numpy as np\n'), ((3188, 3231), 'pandas.to_datetime', 'pd.to_datetime', (["bitcoin_market_info['Date']"], {}), "(bitcoin_market_info['Date'])\n", (3202, 3231), True, 'import pandas as pd\n'), ((6682, 6720), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['el[0]'], {}), '(el[0])\n', (6713, 6720), False, 'import datetime\n'), ((4835, 4849), 'keras.backend.sign', 'K.sign', (['y_true'], {}), '(y_true)\n', (4841, 4849), True, 'from keras import backend as K\n')] |
# Classes and functions to read Apollo SouthBay dataset
import os
import numpy as np
import csv
from typing import List
import third_party.pypcd as pypcd
import misc.poses as poses
from misc.point_clouds import PointCloudLoader
class GroundTruthPoses:
def __init__(self, pose_filepath):
assert os.path.isfile(pose_filepath), f'Cannot access pose file: {pose_filepath}'
self.pose_filepath = pose_filepath
self.pose_ndx = {}
self.read_poses()
def read_poses(self):
with open(self.pose_filepath) as h:
csv_reader = csv.reader(h, delimiter=' ')
for ndx, row in enumerate(csv_reader):
assert len(row) == 9, f'Incorrect format of row {ndx}: {row}'
ndx = int(row[0])
ts = float(row[1])
x = float(row[2])
y = float(row[3])
z = float(row[4])
qx = float(row[5])
qy = float(row[6])
qz = float(row[7])
qr = float(row[8])
se3 = np.eye(4, dtype=np.float64)
# Expects quaternions in w, x, y, z format
se3[0:3, 0:3] = poses.q2r((qr, qx, qy, qz))
se3[0:3, 3] = np.array([x, y, z])
self.pose_ndx[ndx] = (se3, ts) # (pose, timestamp)
class PointCloud:
id: int = 0 # global PointCloud id (unique for each cloud)
def __init__(self, rel_scan_filepath: str, pose: np.ndarray, timestamp: float):
self.rel_scan_filepath = rel_scan_filepath
self.pose = pose
self.timestamp = timestamp
filename = os.path.split(rel_scan_filepath)[1]
# Relative point cloud ids start from 1 in each subfolder/traversal
self.rel_id = int(os.path.splitext(filename)[0])
self.id = PointCloud.id
PointCloud.id += 1
class SouthBayDataset:
def __init__(self, dataset_root):
assert os.path.isdir(dataset_root), f'Cannot access directory: {dataset_root}'
self.dataset_root = dataset_root
self.splits = ['MapData', 'TestData', 'TrainData']
self.pcd_extension = '.pcd' # Point cloud extension
# location_ndx[split][location] = [... list of global ids in this location ...]
self.location_ndx = {}
# pc_ndc[global_id] = PointCloud
self.global_ndx = {}
for split in self.splits:
self.location_ndx[split] = {}
self.index_split(split)
def index_split(self, split):
path = os.path.join(self.dataset_root, split)
assert os.path.isdir(path), f"Missing split: {split}"
# Index locations
locations = os.listdir(path)
locations = [f for f in locations if os.path.isdir(os.path.join(path, f))]
locations.sort()
for loc in locations:
# Locations may contain multiple subfolders hierachy
# All point clouds in all subfolders are stored as one list
rel_working_path = os.path.join(split, loc)
self.location_ndx[split][loc] = []
self.index_location(split, loc, rel_working_path)
def index_location(self, split, loc, rel_working_path):
working_path = os.path.join(self.dataset_root, rel_working_path)
subfolders = os.listdir(working_path)
if 'pcds' in subfolders and 'poses' in subfolders:
# Process point clouds and poses
rel_pcds_path = os.path.join(rel_working_path, 'pcds')
poses_path = os.path.join(working_path, 'poses')
poses_filepath = os.path.join(poses_path, 'gt_poses.txt')
assert os.path.isfile(poses_filepath), f'Missing poses file: {poses_filepath}'
tp = GroundTruthPoses(poses_filepath)
for e in tp.pose_ndx:
se3, ts = tp.pose_ndx[e]
rel_pcd_filepath = os.path.join(rel_pcds_path, str(e) + self.pcd_extension)
pcd_filepath = os.path.join(self.dataset_root, rel_pcd_filepath)
if not os.path.exists(pcd_filepath):
print(f'Missing pcd file: {pcd_filepath}')
pc = PointCloud(rel_pcd_filepath, se3, ts)
self.global_ndx[pc.id] = pc
self.location_ndx[split][loc].append(pc.id)
elif 'pcds' in subfolders or 'poses' in subfolders:
assert False, 'Something wrong. Either pcds or poses folder is missing'
# Recursively process other subfolders - check if they contain point data
rel_subfolders = [os.path.join(rel_working_path, p) for p in subfolders]
rel_subfolders = [p for p in rel_subfolders if os.path.isdir(os.path.join(self.dataset_root, p))]
for sub in rel_subfolders:
self.index_location(split, loc, sub)
def print_info(self):
print(f'Dataset root: {self.dataset_root}')
print(f"Splits: {self.splits}")
for split in self.location_ndx:
locations = self.location_ndx[split].keys()
print(f"Locations in {split}: {locations}")
for loc in locations:
pc_list = self.location_ndx[split][loc]
print(f"{len(pc_list)} point clouds in location {split} - {loc}")
print("")
print(f'Last point cloud id: {PointCloud.id - 1}')
def get_poses(self, split, location=None):
# Get ids and poses of all point clouds from the given split and optionally within a location
if location is None:
locations = list(self.location_ndx[split])
else:
locations = [location]
# Count point clouds
count_pc = 0
for loc in locations:
count_pc += len(self.location_ndx[split][loc])
# Point cloud global ids
pc_ids = np.zeros(count_pc, dtype=np.int64)
# Poses
pc_poses = np.zeros((count_pc, 4, 4), dtype=np.float64)
# Fill ids and pose tables
n = 0
for loc in locations:
for pc_id in self.location_ndx[split][loc]:
pc = self.global_ndx[pc_id]
pc_ids[n] = pc_id
pc_poses[n] = pc.pose
n += 1
return pc_ids, pc_poses
def get_poses2(self, splits: List[str]):
# Get ids and poses of all point clouds from the given splits
locations = list(self.location_ndx[splits[0]])
print(f"Locations: {locations}")
# Count point clouds
count_pc = 0
for split in splits:
for loc in locations:
count_pc += len(self.location_ndx[split][loc])
# Point cloud global ids
pc_ids = np.zeros(count_pc, dtype=np.int32)
# Poses
pc_poses = np.zeros((count_pc, 4, 4), dtype=np.float64)
# Fill ids and pose tables
n = 0
for split in splits:
for loc in locations:
for pc_id in self.location_ndx[split][loc]:
pc = self.global_ndx[pc_id]
pc_ids[n] = pc_id
pc_poses[n] = pc.pose
n += 1
return pc_ids, pc_poses
class SouthbayPointCloudLoader(PointCloudLoader):
def set_properties(self):
# Set point cloud propertiers, such as ground_plane_level. Must be defined in inherited classes.
self.ground_plane_level = -1.6
def read_pc(self, file_pathname):
pc = pypcd.PointCloud.from_path(file_pathname)
# pc.pc_data has the data as a structured array
# pc.fields, pc.count, etc have the metadata
pc = np.stack([pc.pc_data['x'], pc.pc_data['y'], pc.pc_data['z']], axis=1)
# Replace naans with all-zero coords
nan_mask = np.isnan(pc).any(axis=1)
pc[nan_mask] = np.array([0., 0., 0.], dtype=np.float)
return pc
| [
"numpy.stack",
"misc.poses.q2r",
"csv.reader",
"os.path.isdir",
"third_party.pypcd.PointCloud.from_path",
"numpy.zeros",
"os.path.exists",
"numpy.isnan",
"os.path.isfile",
"numpy.array",
"os.path.splitext",
"numpy.eye",
"os.path.split",
"os.path.join",
"os.listdir"
] | [((310, 339), 'os.path.isfile', 'os.path.isfile', (['pose_filepath'], {}), '(pose_filepath)\n', (324, 339), False, 'import os\n'), ((1949, 1976), 'os.path.isdir', 'os.path.isdir', (['dataset_root'], {}), '(dataset_root)\n', (1962, 1976), False, 'import os\n'), ((2540, 2578), 'os.path.join', 'os.path.join', (['self.dataset_root', 'split'], {}), '(self.dataset_root, split)\n', (2552, 2578), False, 'import os\n'), ((2594, 2613), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2607, 2613), False, 'import os\n'), ((2688, 2704), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2698, 2704), False, 'import os\n'), ((3229, 3278), 'os.path.join', 'os.path.join', (['self.dataset_root', 'rel_working_path'], {}), '(self.dataset_root, rel_working_path)\n', (3241, 3278), False, 'import os\n'), ((3300, 3324), 'os.listdir', 'os.listdir', (['working_path'], {}), '(working_path)\n', (3310, 3324), False, 'import os\n'), ((5792, 5826), 'numpy.zeros', 'np.zeros', (['count_pc'], {'dtype': 'np.int64'}), '(count_pc, dtype=np.int64)\n', (5800, 5826), True, 'import numpy as np\n'), ((5862, 5906), 'numpy.zeros', 'np.zeros', (['(count_pc, 4, 4)'], {'dtype': 'np.float64'}), '((count_pc, 4, 4), dtype=np.float64)\n', (5870, 5906), True, 'import numpy as np\n'), ((6656, 6690), 'numpy.zeros', 'np.zeros', (['count_pc'], {'dtype': 'np.int32'}), '(count_pc, dtype=np.int32)\n', (6664, 6690), True, 'import numpy as np\n'), ((6726, 6770), 'numpy.zeros', 'np.zeros', (['(count_pc, 4, 4)'], {'dtype': 'np.float64'}), '((count_pc, 4, 4), dtype=np.float64)\n', (6734, 6770), True, 'import numpy as np\n'), ((7410, 7451), 'third_party.pypcd.PointCloud.from_path', 'pypcd.PointCloud.from_path', (['file_pathname'], {}), '(file_pathname)\n', (7436, 7451), True, 'import third_party.pypcd as pypcd\n'), ((7574, 7643), 'numpy.stack', 'np.stack', (["[pc.pc_data['x'], pc.pc_data['y'], pc.pc_data['z']]"], {'axis': '(1)'}), "([pc.pc_data['x'], pc.pc_data['y'], pc.pc_data['z']], axis=1)\n", (7582, 7643), True, 'import numpy as np\n'), ((7756, 7797), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'np.float'}), '([0.0, 0.0, 0.0], dtype=np.float)\n', (7764, 7797), True, 'import numpy as np\n'), ((577, 605), 'csv.reader', 'csv.reader', (['h'], {'delimiter': '""" """'}), "(h, delimiter=' ')\n", (587, 605), False, 'import csv\n'), ((1643, 1675), 'os.path.split', 'os.path.split', (['rel_scan_filepath'], {}), '(rel_scan_filepath)\n', (1656, 1675), False, 'import os\n'), ((3011, 3035), 'os.path.join', 'os.path.join', (['split', 'loc'], {}), '(split, loc)\n', (3023, 3035), False, 'import os\n'), ((3457, 3495), 'os.path.join', 'os.path.join', (['rel_working_path', '"""pcds"""'], {}), "(rel_working_path, 'pcds')\n", (3469, 3495), False, 'import os\n'), ((3521, 3556), 'os.path.join', 'os.path.join', (['working_path', '"""poses"""'], {}), "(working_path, 'poses')\n", (3533, 3556), False, 'import os\n'), ((3586, 3626), 'os.path.join', 'os.path.join', (['poses_path', '"""gt_poses.txt"""'], {}), "(poses_path, 'gt_poses.txt')\n", (3598, 3626), False, 'import os\n'), ((3646, 3676), 'os.path.isfile', 'os.path.isfile', (['poses_filepath'], {}), '(poses_filepath)\n', (3660, 3676), False, 'import os\n'), ((4548, 4581), 'os.path.join', 'os.path.join', (['rel_working_path', 'p'], {}), '(rel_working_path, p)\n', (4560, 4581), False, 'import os\n'), ((1068, 1095), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float64'}), '(4, dtype=np.float64)\n', (1074, 1095), True, 'import numpy as np\n'), ((1187, 1214), 'misc.poses.q2r', 'poses.q2r', (['(qr, qx, qy, qz)'], {}), '((qr, qx, qy, qz))\n', (1196, 1214), True, 'import misc.poses as poses\n'), ((1245, 1264), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1253, 1264), True, 'import numpy as np\n'), ((1781, 1807), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1797, 1807), False, 'import os\n'), ((3966, 4015), 'os.path.join', 'os.path.join', (['self.dataset_root', 'rel_pcd_filepath'], {}), '(self.dataset_root, rel_pcd_filepath)\n', (3978, 4015), False, 'import os\n'), ((7708, 7720), 'numpy.isnan', 'np.isnan', (['pc'], {}), '(pc)\n', (7716, 7720), True, 'import numpy as np\n'), ((2764, 2785), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (2776, 2785), False, 'import os\n'), ((4039, 4067), 'os.path.exists', 'os.path.exists', (['pcd_filepath'], {}), '(pcd_filepath)\n', (4053, 4067), False, 'import os\n'), ((4672, 4706), 'os.path.join', 'os.path.join', (['self.dataset_root', 'p'], {}), '(self.dataset_root, p)\n', (4684, 4706), False, 'import os\n')] |
import numpy as np
def features_extract_func(task):
return [task.task_config.cpu, task.task_config.memory,
task.task_config.duration, task.waiting_task_instances_number]
def features_extract_func_ac(task):
return features_extract_func(task) + [task.task_config.instances_number, len(task.running_task_instances),
len(task.finished_task_instances)]
def features_normalize_func(x):
y = (np.array(x) - np.array([0, 0, 0.65, 0.009, 74.0, 80.3])) / np.array([64, 1, 0.23, 0.005, 108.0, 643.5])
return y
def features_normalize_func_ac(x):
y = (np.array(x) - np.array([0, 0, 0.65, 0.009, 74.0, 80.3, 80.3, 80.3, 80.3])) / np.array(
[64, 1, 0.23, 0.005, 108.0, 643.5, 643.5, 643.5, 643.5])
return y
| [
"numpy.array"
] | [((516, 560), 'numpy.array', 'np.array', (['[64, 1, 0.23, 0.005, 108.0, 643.5]'], {}), '([64, 1, 0.23, 0.005, 108.0, 643.5])\n', (524, 560), True, 'import numpy as np\n'), ((697, 762), 'numpy.array', 'np.array', (['[64, 1, 0.23, 0.005, 108.0, 643.5, 643.5, 643.5, 643.5]'], {}), '([64, 1, 0.23, 0.005, 108.0, 643.5, 643.5, 643.5, 643.5])\n', (705, 762), True, 'import numpy as np\n'), ((457, 468), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (465, 468), True, 'import numpy as np\n'), ((471, 512), 'numpy.array', 'np.array', (['[0, 0, 0.65, 0.009, 74.0, 80.3]'], {}), '([0, 0, 0.65, 0.009, 74.0, 80.3])\n', (479, 512), True, 'import numpy as np\n'), ((620, 631), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (628, 631), True, 'import numpy as np\n'), ((634, 693), 'numpy.array', 'np.array', (['[0, 0, 0.65, 0.009, 74.0, 80.3, 80.3, 80.3, 80.3]'], {}), '([0, 0, 0.65, 0.009, 74.0, 80.3, 80.3, 80.3, 80.3])\n', (642, 693), True, 'import numpy as np\n')] |
#Detect the Aerobic Bacteria
#Blue and red indicator dyes in the plate color the colonies. Count all
#colonies regardless of their size or color intensity.
#result print in the blue dot. total numbers in the consol
#Author <NAME> 2021 Aug
#<EMAIL>
#product code 6478
import cv2 as cv
import numpy as np
img = cv.imread('ABC_BR.png')
cv.imshow('origin',img)
# crop to keep the center
cropped_image = img[15:650, 15:650]
cv.imshow('crop',cropped_image)
cropped_image = cv.GaussianBlur(cropped_image,(5,5),cv.BORDER_DEFAULT)
b,g,r = cv.split(cropped_image)
cv.imshow('g',g)
corners = cv.goodFeaturesToTrack(g,490000,0.02,8)
corners = np.int0(corners)
count = 0
for i in corners:
x,y = i.ravel()
cv.circle(cropped_image,(x,y),3,255,-1)
count = count + 1
cv.imshow('detected',cropped_image)
print("Total count: {}".format(count))
cv.waitKey(0)
| [
"cv2.GaussianBlur",
"cv2.circle",
"numpy.int0",
"cv2.waitKey",
"cv2.imread",
"cv2.split",
"cv2.goodFeaturesToTrack",
"cv2.imshow"
] | [((312, 335), 'cv2.imread', 'cv.imread', (['"""ABC_BR.png"""'], {}), "('ABC_BR.png')\n", (321, 335), True, 'import cv2 as cv\n'), ((336, 360), 'cv2.imshow', 'cv.imshow', (['"""origin"""', 'img'], {}), "('origin', img)\n", (345, 360), True, 'import cv2 as cv\n'), ((422, 454), 'cv2.imshow', 'cv.imshow', (['"""crop"""', 'cropped_image'], {}), "('crop', cropped_image)\n", (431, 454), True, 'import cv2 as cv\n'), ((470, 527), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['cropped_image', '(5, 5)', 'cv.BORDER_DEFAULT'], {}), '(cropped_image, (5, 5), cv.BORDER_DEFAULT)\n', (485, 527), True, 'import cv2 as cv\n'), ((534, 557), 'cv2.split', 'cv.split', (['cropped_image'], {}), '(cropped_image)\n', (542, 557), True, 'import cv2 as cv\n'), ((558, 575), 'cv2.imshow', 'cv.imshow', (['"""g"""', 'g'], {}), "('g', g)\n", (567, 575), True, 'import cv2 as cv\n'), ((586, 628), 'cv2.goodFeaturesToTrack', 'cv.goodFeaturesToTrack', (['g', '(490000)', '(0.02)', '(8)'], {}), '(g, 490000, 0.02, 8)\n', (608, 628), True, 'import cv2 as cv\n'), ((636, 652), 'numpy.int0', 'np.int0', (['corners'], {}), '(corners)\n', (643, 652), True, 'import numpy as np\n'), ((769, 805), 'cv2.imshow', 'cv.imshow', (['"""detected"""', 'cropped_image'], {}), "('detected', cropped_image)\n", (778, 805), True, 'import cv2 as cv\n'), ((844, 857), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (854, 857), True, 'import cv2 as cv\n'), ((706, 750), 'cv2.circle', 'cv.circle', (['cropped_image', '(x, y)', '(3)', '(255)', '(-1)'], {}), '(cropped_image, (x, y), 3, 255, -1)\n', (715, 750), True, 'import cv2 as cv\n')] |
import unittest
from numpy import arange, cos, array, all, isclose, ones
from classification import FastNeuralNetwork as nn
class FastNeuralNetworkTest(unittest.TestCase):
def setUp(self):
self.theta = arange(1, 19) / 10.0
self.il = 2
self.hl = 2
self.nl = 4
X = cos([[1, 2], [3, 4], [5, 6]])
self.X = nn.reshape_training_set(X)
y = (array([4, 2, 3]) - 1) % 4
self.y = nn.reshape_labels(y)
self.th1 = array([[0.1, 0.3, 0.5],
[0.2, 0.4, 0.6]])
self.th2 = array([[0.7, 1.1, 1.5],
[0.8, 1.2, 1.6],
[0.9, 1.3, 1.7],
[1., 1.4, 1.8]])
self.z2 = array([[0.05401727, 0.16643282],
[-0.52381956, -0.58818317],
[0.6651838, 0.88956705]])
self.a2 = array([[0.51350103, 0.54151242],
[0.37195952, 0.35705182],
[0.66042389, 0.70880081]])
self.a3 = array([[0.8886593, 0.9074274, 0.9233049, 0.9366493],
[0.8381779, 0.8602820, 0.8797997, 0.8969177],
[0.9234142, 0.9385775, 0.9508982, 0.9608506]])
self.nn = nn()
def tearDown(self):
self.theta = None
self.il = None
self.hl = None
self.nl = None
self.X = None
self.y = None
self.a2 = None
self.a3 = None
self.z2 = None
self.th1 = None
self.th2 = None
def testForwardPropagation(self):
th1, th2, z2, aa2, a3 \
= self.nn.forward_propagation(self.theta, self.il, self.hl,
self.nl, self.X)
self.assertTrue(all(isclose(z2, self.z2)))
self.assertTrue(all(isclose(th2, self.th2)))
self.assertTrue(all(isclose(th1, self.th1)))
self.assertTrue(all(isclose(aa2[:, 1:], self.a2)))
self.assertTrue(all(isclose(a3, self.a3)))
def testCost(self):
j = self.nn.cost(self.a3, self.y, self.th1, self.th2, 0)
self.assertTrue(all(isclose([j], [7.4069])))
def testCostRegularization(self):
j = self.nn.cost(self.a3, self.y, self.th1, self.th2, 4.0)
self.assertTrue(all(isclose([j], [19.473636522732416])))
def testGradient(self):
aa2 = ones((self.a2.shape[0], self.a2.shape[1]+1))
aa2[:, 1:] = self.a2
expected = array([0.766138369630136, 0.979896866040661,
-0.027539615885635, -0.035844208951086,
-0.024928782740987, -0.053861693972528,
0.883417207679397, 0.568762344914511,
0.584667662135129, 0.598139236978449,
0.459313549296372, 0.344618182524694,
0.256313331202455, 0.311885062878785,
0.478336623152867, 0.368920406686281,
0.259770621934099, 0.322330889109923])
actual = self.nn.gradient(self.X, self.y, 0, self.th1, self.th2, self.a3, aa2, self.z2)
self.assertTrue(all(isclose(actual, expected)))
def testGradientRegularization(self):
aa2 = ones((self.a2.shape[0], self.a2.shape[1]+1))
aa2[:, 1:] = self.a2
expected = array([0.766138369630136, 0.979896866040661,
0.372460384114365, 0.497489124382247,
0.641737883925680, 0.746138306027472,
0.883417207679397, 0.568762344914511,
0.584667662135129, 0.598139236978449,
1.925980215963038, 1.944618182524693,
1.989646664535788, 2.178551729545452,
2.478336623152867, 2.502253740019614,
2.526437288600766, 2.722330889109923])
actual = self.nn.gradient(self.X, self.y, 4.0, self.th1, self.th2, self.a3, aa2, self.z2)
self.assertTrue(all(isclose(actual, expected)))
def testCostFunction(self):
self.nn.l = 0
self.nn.il = self.il
self.nn.hl = self.hl
self.nn.nl = self.nl
self.nn.X = self.X
self.nn.y = self.y
expected = array([0.766138369630136, 0.979896866040661,
-0.027539615885635, -0.035844208951086,
-0.024928782740987, -0.053861693972528,
0.883417207679397, 0.568762344914511,
0.584667662135129, 0.598139236978449,
0.459313549296372, 0.344618182524694,
0.256313331202455, 0.311885062878785,
0.478336623152867, 0.368920406686281,
0.259770621934099, 0.322330889109923])
j, grad = self.nn.cost_function(self.theta)
self.assertTrue(all(isclose([j], [7.4069])))
self.assertTrue(all(isclose(grad, expected)))
def testCostFunctionWRegularization(self):
self.nn.l = 4.0
self.nn.il = self.il
self.nn.hl = self.hl
self.nn.nl = self.nl
self.nn.X = self.X
self.nn.y = self.y
expected = array([0.766138369630136, 0.979896866040661,
0.372460384114365, 0.497489124382247,
0.641737883925680, 0.746138306027472,
0.883417207679397, 0.568762344914511,
0.584667662135129, 0.598139236978449,
1.925980215963038, 1.944618182524693,
1.989646664535788, 2.178551729545452,
2.478336623152867, 2.502253740019614,
2.526437288600766, 2.722330889109923])
j, grad = self.nn.cost_function(self.theta)
self.assertTrue(all(isclose([j], [19.473636522732416])))
self.assertTrue(all(isclose(grad, expected)))
| [
"classification.FastNeuralNetwork.reshape_training_set",
"numpy.ones",
"numpy.isclose",
"numpy.array",
"classification.FastNeuralNetwork",
"numpy.cos",
"numpy.arange",
"classification.FastNeuralNetwork.reshape_labels"
] | [((309, 338), 'numpy.cos', 'cos', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (312, 338), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((356, 382), 'classification.FastNeuralNetwork.reshape_training_set', 'nn.reshape_training_set', (['X'], {}), '(X)\n', (379, 382), True, 'from classification import FastNeuralNetwork as nn\n'), ((439, 459), 'classification.FastNeuralNetwork.reshape_labels', 'nn.reshape_labels', (['y'], {}), '(y)\n', (456, 459), True, 'from classification import FastNeuralNetwork as nn\n'), ((479, 520), 'numpy.array', 'array', (['[[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]]'], {}), '([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n', (484, 520), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((570, 645), 'numpy.array', 'array', (['[[0.7, 1.1, 1.5], [0.8, 1.2, 1.6], [0.9, 1.3, 1.7], [1.0, 1.4, 1.8]]'], {}), '([[0.7, 1.1, 1.5], [0.8, 1.2, 1.6], [0.9, 1.3, 1.7], [1.0, 1.4, 1.8]])\n', (575, 645), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((753, 844), 'numpy.array', 'array', (['[[0.05401727, 0.16643282], [-0.52381956, -0.58818317], [0.6651838, 0.88956705]]'], {}), '([[0.05401727, 0.16643282], [-0.52381956, -0.58818317], [0.6651838, \n 0.88956705]])\n', (758, 844), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((908, 998), 'numpy.array', 'array', (['[[0.51350103, 0.54151242], [0.37195952, 0.35705182], [0.66042389, 0.70880081]]'], {}), '([[0.51350103, 0.54151242], [0.37195952, 0.35705182], [0.66042389, \n 0.70880081]])\n', (913, 998), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((1062, 1211), 'numpy.array', 'array', (['[[0.8886593, 0.9074274, 0.9233049, 0.9366493], [0.8381779, 0.860282, \n 0.8797997, 0.8969177], [0.9234142, 0.9385775, 0.9508982, 0.9608506]]'], {}), '([[0.8886593, 0.9074274, 0.9233049, 0.9366493], [0.8381779, 0.860282, \n 0.8797997, 0.8969177], [0.9234142, 0.9385775, 0.9508982, 0.9608506]])\n', (1067, 1211), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((1276, 1280), 'classification.FastNeuralNetwork', 'nn', ([], {}), '()\n', (1278, 1280), True, 'from classification import FastNeuralNetwork as nn\n'), ((2385, 2431), 'numpy.ones', 'ones', (['(self.a2.shape[0], self.a2.shape[1] + 1)'], {}), '((self.a2.shape[0], self.a2.shape[1] + 1))\n', (2389, 2431), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((2478, 2856), 'numpy.array', 'array', (['[0.766138369630136, 0.979896866040661, -0.027539615885635, -\n 0.035844208951086, -0.024928782740987, -0.053861693972528, \n 0.883417207679397, 0.568762344914511, 0.584667662135129, \n 0.598139236978449, 0.459313549296372, 0.344618182524694, \n 0.256313331202455, 0.311885062878785, 0.478336623152867, \n 0.368920406686281, 0.259770621934099, 0.322330889109923]'], {}), '([0.766138369630136, 0.979896866040661, -0.027539615885635, -\n 0.035844208951086, -0.024928782740987, -0.053861693972528, \n 0.883417207679397, 0.568762344914511, 0.584667662135129, \n 0.598139236978449, 0.459313549296372, 0.344618182524694, \n 0.256313331202455, 0.311885062878785, 0.478336623152867, \n 0.368920406686281, 0.259770621934099, 0.322330889109923])\n', (2483, 2856), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((3250, 3296), 'numpy.ones', 'ones', (['(self.a2.shape[0], self.a2.shape[1] + 1)'], {}), '((self.a2.shape[0], self.a2.shape[1] + 1))\n', (3254, 3296), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((3343, 3716), 'numpy.array', 'array', (['[0.766138369630136, 0.979896866040661, 0.372460384114365, 0.497489124382247,\n 0.64173788392568, 0.746138306027472, 0.883417207679397, \n 0.568762344914511, 0.584667662135129, 0.598139236978449, \n 1.925980215963038, 1.944618182524693, 1.989646664535788, \n 2.178551729545452, 2.478336623152867, 2.502253740019614, \n 2.526437288600766, 2.722330889109923]'], {}), '([0.766138369630136, 0.979896866040661, 0.372460384114365, \n 0.497489124382247, 0.64173788392568, 0.746138306027472, \n 0.883417207679397, 0.568762344914511, 0.584667662135129, \n 0.598139236978449, 1.925980215963038, 1.944618182524693, \n 1.989646664535788, 2.178551729545452, 2.478336623152867, \n 2.502253740019614, 2.526437288600766, 2.722330889109923])\n', (3348, 3716), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((4270, 4648), 'numpy.array', 'array', (['[0.766138369630136, 0.979896866040661, -0.027539615885635, -\n 0.035844208951086, -0.024928782740987, -0.053861693972528, \n 0.883417207679397, 0.568762344914511, 0.584667662135129, \n 0.598139236978449, 0.459313549296372, 0.344618182524694, \n 0.256313331202455, 0.311885062878785, 0.478336623152867, \n 0.368920406686281, 0.259770621934099, 0.322330889109923]'], {}), '([0.766138369630136, 0.979896866040661, -0.027539615885635, -\n 0.035844208951086, -0.024928782740987, -0.053861693972528, \n 0.883417207679397, 0.568762344914511, 0.584667662135129, \n 0.598139236978449, 0.459313549296372, 0.344618182524694, \n 0.256313331202455, 0.311885062878785, 0.478336623152867, \n 0.368920406686281, 0.259770621934099, 0.322330889109923])\n', (4275, 4648), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((5223, 5596), 'numpy.array', 'array', (['[0.766138369630136, 0.979896866040661, 0.372460384114365, 0.497489124382247,\n 0.64173788392568, 0.746138306027472, 0.883417207679397, \n 0.568762344914511, 0.584667662135129, 0.598139236978449, \n 1.925980215963038, 1.944618182524693, 1.989646664535788, \n 2.178551729545452, 2.478336623152867, 2.502253740019614, \n 2.526437288600766, 2.722330889109923]'], {}), '([0.766138369630136, 0.979896866040661, 0.372460384114365, \n 0.497489124382247, 0.64173788392568, 0.746138306027472, \n 0.883417207679397, 0.568762344914511, 0.584667662135129, \n 0.598139236978449, 1.925980215963038, 1.944618182524693, \n 1.989646664535788, 2.178551729545452, 2.478336623152867, \n 2.502253740019614, 2.526437288600766, 2.722330889109923])\n', (5228, 5596), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((216, 229), 'numpy.arange', 'arange', (['(1)', '(19)'], {}), '(1, 19)\n', (222, 229), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((396, 412), 'numpy.array', 'array', (['[4, 2, 3]'], {}), '([4, 2, 3])\n', (401, 412), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((1789, 1809), 'numpy.isclose', 'isclose', (['z2', 'self.z2'], {}), '(z2, self.z2)\n', (1796, 1809), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((1840, 1862), 'numpy.isclose', 'isclose', (['th2', 'self.th2'], {}), '(th2, self.th2)\n', (1847, 1862), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((1893, 1915), 'numpy.isclose', 'isclose', (['th1', 'self.th1'], {}), '(th1, self.th1)\n', (1900, 1915), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((1946, 1974), 'numpy.isclose', 'isclose', (['aa2[:, 1:]', 'self.a2'], {}), '(aa2[:, 1:], self.a2)\n', (1953, 1974), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((2005, 2025), 'numpy.isclose', 'isclose', (['a3', 'self.a3'], {}), '(a3, self.a3)\n', (2012, 2025), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((2146, 2168), 'numpy.isclose', 'isclose', (['[j]', '[7.4069]'], {}), '([j], [7.4069])\n', (2153, 2168), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((2305, 2339), 'numpy.isclose', 'isclose', (['[j]', '[19.473636522732416]'], {}), '([j], [19.473636522732416])\n', (2312, 2339), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((3164, 3189), 'numpy.isclose', 'isclose', (['actual', 'expected'], {}), '(actual, expected)\n', (3171, 3189), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((4027, 4052), 'numpy.isclose', 'isclose', (['actual', 'expected'], {}), '(actual, expected)\n', (4034, 4052), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((4912, 4934), 'numpy.isclose', 'isclose', (['[j]', '[7.4069]'], {}), '([j], [7.4069])\n', (4919, 4934), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((4965, 4988), 'numpy.isclose', 'isclose', (['grad', 'expected'], {}), '(grad, expected)\n', (4972, 4988), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((5861, 5895), 'numpy.isclose', 'isclose', (['[j]', '[19.473636522732416]'], {}), '([j], [19.473636522732416])\n', (5868, 5895), False, 'from numpy import arange, cos, array, all, isclose, ones\n'), ((5926, 5949), 'numpy.isclose', 'isclose', (['grad', 'expected'], {}), '(grad, expected)\n', (5933, 5949), False, 'from numpy import arange, cos, array, all, isclose, ones\n')] |
from graphgen.weighted_undirected_graph_generator import GenerateWeightedUndirectedGraph
from graphgen.unweighted_undirected_graph_generator import GenerateUnweightedUndirectedGraph
from graphgen.weighted_directed_graph_generator import GenerateWeightedDirectedGraph
from graphgen.unweighted_directed_graph_generator import GenerateUnweightedDirectedGraph
import networkx as nx
import numpy as np
import inspect
DEFAULT_FLOAT = np.float32
DEFAULT_INT = np.int64
def weighted_undirected_lfr_graph(num_nodes, average_k, max_degree, mut,
muw, com_size_min, com_size_max, seed, beta=1.5,
tau=2.0, tau2=1.0, overlapping_nodes=0,
overlap_membership=0, fixed_range=True,
excess=False, defect=False, randomf=False,
avg_clustering=0.0, edge_dtype=None,
weight_dtype=None):
"""
Nodes start at 0 and are contiguous
Return Ex2 numpy array, tuple of community memberships for each node,
and a numpy array of weights corresponding to each edge in edge list (i.e
the ith weight belongs to the ith edge)
:param num_nodes: Number of nodes in the network (starts id at 0)
:param average_k: average degree of the nodes
:param max_degree: largest degree of the nodes
:param mut: mixing parameter, fraction of bridges
:param muw: weight mixing parameter
:param beta: minus exponent for weight distribution
:param com_size_min: smallest community size
:param com_size_max: largest community size
:param seed: for rng
:param tau: minus exponent for degree sequence
:param tau2: minus exponent for community size distribution
:param overlapping_nodes: number of overlapping nodes
:param overlap_membership: number of memberships of overlapping nodes
:param fixed_range: If True, uses com_size_min/max, else distribution
determines the range
:param excess: -
:param defect: -
:param randomf: -
:param avg_clustering: the average clustering coefficient
:param edge_dtype: dtype of edge array. Default: DEFAULT_INT
:param weight_dtype: dtype of weights. Default: DEFAULT_FLOAT
:return: (Ex2 numpy array, tuple community memberships for each node, E numpy array)
* Order of resulting Numpy array is: Ex2 with major axis as [0]=tail, [1]=head
* Row major format, so [edge#][0]=tail, [edge#][1]=head
"""
if edge_dtype is None:
edge_dtype = DEFAULT_INT
if weight_dtype is None:
weight_dtype = DEFAULT_FLOAT
edge_array, community_memberships, weights = GenerateWeightedUndirectedGraph(
num_nodes, average_k, max_degree, mut, muw, com_size_min, com_size_max,
seed, tau, tau2, overlapping_nodes, overlap_membership, fixed_range,
excess, defect, randomf, beta, avg_clustering)
if edge_array.dtype != edge_dtype:
edge_array = edge_array.astype(edge_dtype)
if weights.dtype != weight_dtype:
weights = weights.astype(weight_dtype)
return edge_array, community_memberships, weights
def weighted_undirected_lfr_as_nx(*args, **kwargs):
"""
Nodes start at 0 and are contiguous
Calls weighted_undirected_lfr_graph and converts to a networkx graph
:return: networkx graph
"""
edge_array, community_memberships, weights = weighted_undirected_lfr_graph(*args, **kwargs)
nx_graph = nx.Graph()
# Add nodes and attributes to graph
nodes_and_memberships = []
for node, node_memberships in enumerate(community_memberships):
attributes = {'communities': node_memberships}
for com_level, membership in enumerate(node_memberships):
attributes['com_level'] = membership
nodes_and_memberships.append((node, attributes))
nx_graph.add_nodes_from(nodes_and_memberships)
# Add edges to graph
nx_graph.add_edges_from(edge_array)
nx.set_edge_attributes(nx_graph, 'weight', {tuple(edge): weights[i]
for i, edge in enumerate(edge_array)})
return nx_graph
def weighted_undirected_lfr_as_adj(*args, **kwargs):
"""
Calls weighted_undirected_lfr_graph and converts to a numpy matrix
:param transpose: transpose the matrix representation
:return: NxN float32 numpy array, and community membership
adj matrix: axis1 (minor) is tail, axis2 (major) is head
or (transpose): axis1 is head, axis2 is tail
"""
graph_pars = {key: value for key, value in kwargs.items()
if key in inspect.getfullargspec(weighted_undirected_lfr_graph).args}
converter_pars = {key: value for key, value in kwargs.items()
if key in inspect.getfullargspec(convert_weighted_to_numpy_matrix).args}
edge_array, community_membership, weights = weighted_undirected_lfr_graph(*args,
**graph_pars)
return (convert_weighted_to_numpy_matrix(edge_array,
weights=weights, **converter_pars),
community_membership)
def weighted_directed_lfr_graph(num_nodes, average_k, max_degree, mut,
muw, com_size_min, com_size_max, seed, beta=1.5,
tau=2.0, tau2=1.0, overlapping_nodes=0,
overlap_membership=0, fixed_range=True,
excess=False, defect=False, randomf=False,
edge_dtype=None, weight_dtype=None):
"""
Nodes start at 0 and are contiguous
Return Ex2 numpy array, tuple of community memberships for each node,
and a numpy array of weights corresponding to each edge in edge list (i.e
the ith weight belongs to the ith edge)
:param num_nodes: Number of nodes in the network (starts id at 0)
:param average_k: average degree of the nodes
:param max_degree: largest degree of the nodes
:param mut: mixing parameter, fraction of bridges
:param muw: weight mixing parameter
:param beta: minus exponent for weight distribution
:param com_size_min: smallest community size
:param com_size_max: largest community size
:param seed: for rng
:param tau: minus exponent for degree sequence
:param tau2: minus exponent for community size distribution
:param overlapping_nodes: number of overlapping nodes
:param overlap_membership: number of memberships of overlapping nodes
:param fixed_range: If True, uses com_size_min/max, else distribution
determines the range
:param excess: -
:param defect: -
:param randomf: -
:param edge_dtype: dtype of edge array. Default: DEFAULT_INT
:param weight_dtype: dtype of weights. Default: DEFAULT_FLOAT
:return: (Ex2 numpy array, tuple community memberships for each node, E numpy array)
* Order of resulting Numpy array is: Ex2 with major axis as [0]=tail, [1]=head
* Row major format, so [edge#][0]=tail, [edge#][1]=head
"""
if edge_dtype is None:
edge_dtype = DEFAULT_INT
if weight_dtype is None:
weight_dtype = DEFAULT_FLOAT
edge_array, community_memberships, weights = GenerateWeightedDirectedGraph(
num_nodes, average_k, max_degree, mut, muw, com_size_min, com_size_max,
seed, tau, tau2, overlapping_nodes, overlap_membership, fixed_range,
excess, defect, randomf, beta)
if edge_array.dtype != edge_dtype:
edge_array = edge_array.astype(edge_dtype)
if weights.dtype != weight_dtype:
weights = weights.astype(weight_dtype)
return edge_array, community_memberships, weights
def weighted_directed_lfr_as_nx(*args, **kwargs):
"""
Nodes start at 0 and are contiguous
Calls weighted_directed_lfr_graph and converts to a networkx graph
:return: networkx graph
"""
edge_array, community_memberships, weights = weighted_directed_lfr_graph(*args,
**kwargs)
nx_graph = nx.DiGraph()
# Add nodes and attributes to graph
nodes_and_memberships = []
for node, node_memberships in enumerate(community_memberships):
attributes = {'communities': node_memberships}
for com_level, membership in enumerate(node_memberships):
attributes['com_level'] = membership
nodes_and_memberships.append((node, attributes))
nx_graph.add_nodes_from(nodes_and_memberships)
# Add edges to graph
nx_graph.add_edges_from(edge_array)
nx.set_edge_attributes(nx_graph, 'weight', {tuple(edge): weights[i]
for i, edge in enumerate(edge_array)})
return nx_graph
def weighted_directed_lfr_as_adj(*args, **kwargs):
"""
Calls weighted_directed_lfr_graph and converts to a numpy matrix
:param transpose: transpose the matrix representation
:return: NxN float32 numpy array, and community membership
adj matrix: axis1 (minor) is tail, axis2 (major) is head
or (transpose): axis1 is head, axis2 is tail
"""
graph_pars = {key: value for key, value in kwargs.items()
if key in inspect.getfullargspec(weighted_directed_lfr_graph).args}
converter_pars = {key: value for key, value in kwargs.items()
if key in inspect.getfullargspec(convert_weighted_to_numpy_matrix).args}
edge_array, community_membership, weights = weighted_directed_lfr_graph(*args,
**graph_pars)
return (convert_weighted_to_numpy_matrix(edge_array,
weights=weights, **converter_pars),
community_membership)
def unweighted_undirected_lfr_graph(num_nodes, average_k, max_degree, mu,
com_size_min, com_size_max, seed, tau=2.0,
tau2=1.0, overlapping_nodes=0,
overlap_membership=0, fixed_range=True,
excess=False, defect=False, randomf=False,
avg_clustering=0.0, edge_dtype=None):
"""
Nodes start at 0 and are contiguous
Return Ex2 numpy array and tuple of community memberships for each node
:param num_nodes: Number of nodes in the network (starts id at 0)
:param average_k: average degree of the nodes
:param max_degree: largest degree of the nodes
:param mu: mixing parameter, fraction of bridges
:param com_size_min: smallest community size
:param com_size_max: largest community size
:param seed: for rng
:param tau: minus exponent for degree sequence
:param tau2: minus exponent for community size distribution
:param overlapping_nodes: number of overlapping nodes
:param overlap_membership: number of memberships of overlapping nodes
:param fixed_range: If True, uses com_size_min/max, else distribution
determines the range
:param excess: -
:param defect: -
:param randomf: -
:param avg_clustering: the average clustering coefficient
:param edge_dtype: return type of edge list. Default: DEFAULT_INT
:return: (Ex2 numpy array, tuple community memberships for each node)
"""
if edge_dtype is None:
edge_dtype = DEFAULT_INT
edge_array, community_memberships = GenerateUnweightedUndirectedGraph(
num_nodes, average_k, max_degree, mu, com_size_min, com_size_max, seed,
tau, tau2, overlapping_nodes, overlap_membership, fixed_range, excess,
defect, randomf, avg_clustering)
if edge_array.dtype != edge_dtype:
edge_array = edge_array.astype(edge_dtype)
return edge_array, community_memberships
def unweighted_undirected_lfr_as_nx(*args, **kwargs):
"""
Nodes start at 0 and are contiguous
Calls unweighted_undirected_lfr_graph and converts to a networkx graph
:return: networkx graph
"""
edge_array, community_memberships = unweighted_undirected_lfr_graph(*args, **kwargs)
nx_graph = nx.Graph()
# Add nodes and attributes to graph
nodes_and_memberships = []
for node, node_memberships in enumerate(community_memberships):
attributes = {'communities': node_memberships}
for com_level, membership in enumerate(node_memberships):
attributes['com_level'] = membership
nodes_and_memberships.append((node, attributes))
nx_graph.add_nodes_from(nodes_and_memberships)
# Add edges to graph
nx_graph.add_edges_from(edge_array)
return nx_graph
def unweighted_undirected_lfr_as_adj(*args, **kwargs):
"""
Nodes start at 0 and are contiguous
Calls unweighted_undirected_lfr_graph and converts to an adjacency matrix
:param transpose: transpose the matrix representation
:return: Return a adj matrix: axis1 (minor) is tail, axis2 (major) is head
and return community membership
or (transpose): axis1 is head, axis2 is tail
"""
graph_pars = {key: value for key, value in kwargs.items()
if key in inspect.getfullargspec(unweighted_undirected_lfr_graph).args}
converter_pars = {key: value for key, value in kwargs.items()
if key in inspect.getfullargspec(convert_unweighted_to_numpy_matrix).args}
edge_array, community_memberships = unweighted_undirected_lfr_graph(*args, **graph_pars)
return (convert_unweighted_to_numpy_matrix(edge_array,
**converter_pars),
community_memberships)
def unweighted_directed_lfr_graph(num_nodes, average_k, max_degree, mu,
com_size_min, com_size_max, seed, tau=2.0,
tau2=1.0, overlapping_nodes=0,
overlap_membership=0, fixed_range=True,
excess=False, defect=False, randomf=False,
edge_dtype=None):
"""
Nodes start at 0 and are contiguous
Return Ex2 numpy array and tuple of community memberships for each node
:param num_nodes: Number of nodes in the network (starts id at 0)
:param average_k: average degree of the nodes
:param max_degree: largest degree of the nodes
:param mu: mixing parameter, fraction of bridges
:param com_size_min: smallest community size
:param com_size_max: largest community size
:param seed: for rng
:param tau: minus exponent for degree sequence
:param tau2: minus exponent for community size distribution
:param overlapping_nodes: number of overlapping nodes
:param overlap_membership: number of memberships of overlapping nodes
:param fixed_range: If True, uses com_size_min/max, else distribution
determines the range
:param excess: -
:param defect: -
:param randomf: -
:param edge_dtype: dtype of edge array. Default: DEFAULT_INT
:return: (Ex2 numpy array, tuple community memberships for each node)
* Order of resulting Numpy array is: Ex2 with major axis as [0]=tail, [1]=head
* Row major format, so [edge#][0]=tail, [edge#][1]=head
"""
if edge_dtype is None:
edge_dtype = DEFAULT_INT
edge_array, community_memberships = GenerateUnweightedDirectedGraph(
num_nodes, average_k, max_degree, mu, com_size_min, com_size_max, seed,
tau, tau2, overlapping_nodes, overlap_membership, fixed_range, excess,
defect, randomf)
if edge_array.dtype != edge_dtype:
edge_array = edge_array.astype(edge_dtype)
return edge_array, community_memberships
def unweighted_directed_lfr_as_nx(*args, **kwargs):
"""
Nodes start at 0 and are contiguous
Calls unweighted_directed_lfr_graph and converts to a networkx graph
:return: networkx graph
"""
edge_array, community_memberships = unweighted_directed_lfr_graph(*args, **kwargs)
nx_graph = nx.DiGraph()
# Add nodes and attributes to graph
nodes_and_memberships = []
for node, node_memberships in enumerate(community_memberships):
attributes = {'communities': node_memberships}
for com_level, membership in enumerate(node_memberships):
attributes['com_level'] = membership
nodes_and_memberships.append((node, attributes))
nx_graph.add_nodes_from(nodes_and_memberships)
# Add edges to graph
nx_graph.add_edges_from(edge_array)
return nx_graph
def unweighted_directed_lfr_as_adj(*args, **kwargs):
"""
Nodes start at 0 and are contiguous
Calls unweighted_directed_lfr_graph and converts to an adjacency matrix
:param transpose: transpose the matrix representation
:return: Return a adj matrix: axis1 (minor) is tail, axis2 (major) is head
and return community membership
or (transpose): axis1 is head, axis2 is tail
"""
graph_pars = {key: value for key, value in kwargs.items()
if key in inspect.getfullargspec(unweighted_directed_lfr_graph).args}
converter_pars = {key: value for key, value in kwargs.items()
if key in inspect.getfullargspec(convert_unweighted_to_numpy_matrix).args}
edge_array, community_memberships = unweighted_directed_lfr_graph(*args, **graph_pars)
return (convert_unweighted_to_numpy_matrix(edge_array,
**converter_pars),
community_memberships)
def convert_weighted_to_numpy_matrix(edge_array, num_nodes, weights, transpose=False,
weight_dtype=None):
"""
:param edge_array: Ex2 numpy array
:param num_nodes: N
:param weights: E np.float32 array
:param transpose: transposes output matrix to reverse representation order
default: False
:param weight_dtype: dtype of return matrix
:return: dtype=np.float32 NxN matrix
"""
if weight_dtype is None:
weight_dtype = DEFAULT_FLOAT
matrix = np.zeros((num_nodes, num_nodes), dtype=weight_dtype)
for i, edge in enumerate(edge_array):
matrix[edge[0], edge[1]] = weights[i]
if transpose:
return matrix.transpose().copy()
return matrix
def convert_unweighted_to_numpy_matrix(edge_array, num_nodes, transpose=False,
edge_dtype=None):
"""
:param edge_array: Ex2 numpy array
:param num_nodes: N
:param transpose: transposes output matrix to reverse representation order
default: False
:param edge_dtype: dtype of return matrix
:return: dtype=np.uint64 NxN matrix
"""
if edge_dtype is None:
edge_dtype = DEFAULT_INT
matrix = np.zeros((num_nodes, num_nodes), dtype=edge_dtype)
for edge in edge_array:
matrix[edge[0], edge[1]] = 1
if transpose:
return matrix.transpose().copy()
return matrix
if __name__ == '__main__':
"""
"""
pass
| [
"inspect.getfullargspec",
"numpy.zeros",
"graphgen.unweighted_undirected_graph_generator.GenerateUnweightedUndirectedGraph",
"networkx.Graph",
"graphgen.weighted_directed_graph_generator.GenerateWeightedDirectedGraph",
"graphgen.unweighted_directed_graph_generator.GenerateUnweightedDirectedGraph",
"grap... | [((2686, 2917), 'graphgen.weighted_undirected_graph_generator.GenerateWeightedUndirectedGraph', 'GenerateWeightedUndirectedGraph', (['num_nodes', 'average_k', 'max_degree', 'mut', 'muw', 'com_size_min', 'com_size_max', 'seed', 'tau', 'tau2', 'overlapping_nodes', 'overlap_membership', 'fixed_range', 'excess', 'defect', 'randomf', 'beta', 'avg_clustering'], {}), '(num_nodes, average_k, max_degree, mut, muw,\n com_size_min, com_size_max, seed, tau, tau2, overlapping_nodes,\n overlap_membership, fixed_range, excess, defect, randomf, beta,\n avg_clustering)\n', (2717, 2917), False, 'from graphgen.weighted_undirected_graph_generator import GenerateWeightedUndirectedGraph\n'), ((3485, 3495), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3493, 3495), True, 'import networkx as nx\n'), ((7298, 7507), 'graphgen.weighted_directed_graph_generator.GenerateWeightedDirectedGraph', 'GenerateWeightedDirectedGraph', (['num_nodes', 'average_k', 'max_degree', 'mut', 'muw', 'com_size_min', 'com_size_max', 'seed', 'tau', 'tau2', 'overlapping_nodes', 'overlap_membership', 'fixed_range', 'excess', 'defect', 'randomf', 'beta'], {}), '(num_nodes, average_k, max_degree, mut, muw,\n com_size_min, com_size_max, seed, tau, tau2, overlapping_nodes,\n overlap_membership, fixed_range, excess, defect, randomf, beta)\n', (7327, 7507), False, 'from graphgen.weighted_directed_graph_generator import GenerateWeightedDirectedGraph\n'), ((8150, 8162), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (8160, 8162), True, 'import networkx as nx\n'), ((11513, 11730), 'graphgen.unweighted_undirected_graph_generator.GenerateUnweightedUndirectedGraph', 'GenerateUnweightedUndirectedGraph', (['num_nodes', 'average_k', 'max_degree', 'mu', 'com_size_min', 'com_size_max', 'seed', 'tau', 'tau2', 'overlapping_nodes', 'overlap_membership', 'fixed_range', 'excess', 'defect', 'randomf', 'avg_clustering'], {}), '(num_nodes, average_k, max_degree, mu,\n com_size_min, com_size_max, seed, tau, tau2, overlapping_nodes,\n overlap_membership, fixed_range, excess, defect, randomf, avg_clustering)\n', (11546, 11730), False, 'from graphgen.unweighted_undirected_graph_generator import GenerateUnweightedUndirectedGraph\n'), ((12205, 12215), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (12213, 12215), True, 'import networkx as nx\n'), ((15411, 15610), 'graphgen.unweighted_directed_graph_generator.GenerateUnweightedDirectedGraph', 'GenerateUnweightedDirectedGraph', (['num_nodes', 'average_k', 'max_degree', 'mu', 'com_size_min', 'com_size_max', 'seed', 'tau', 'tau2', 'overlapping_nodes', 'overlap_membership', 'fixed_range', 'excess', 'defect', 'randomf'], {}), '(num_nodes, average_k, max_degree, mu,\n com_size_min, com_size_max, seed, tau, tau2, overlapping_nodes,\n overlap_membership, fixed_range, excess, defect, randomf)\n', (15442, 15610), False, 'from graphgen.unweighted_directed_graph_generator import GenerateUnweightedDirectedGraph\n'), ((16081, 16093), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (16091, 16093), True, 'import networkx as nx\n'), ((18118, 18170), 'numpy.zeros', 'np.zeros', (['(num_nodes, num_nodes)'], {'dtype': 'weight_dtype'}), '((num_nodes, num_nodes), dtype=weight_dtype)\n', (18126, 18170), True, 'import numpy as np\n'), ((18818, 18868), 'numpy.zeros', 'np.zeros', (['(num_nodes, num_nodes)'], {'dtype': 'edge_dtype'}), '((num_nodes, num_nodes), dtype=edge_dtype)\n', (18826, 18868), True, 'import numpy as np\n'), ((4632, 4685), 'inspect.getfullargspec', 'inspect.getfullargspec', (['weighted_undirected_lfr_graph'], {}), '(weighted_undirected_lfr_graph)\n', (4654, 4685), False, 'import inspect\n'), ((4791, 4847), 'inspect.getfullargspec', 'inspect.getfullargspec', (['convert_weighted_to_numpy_matrix'], {}), '(convert_weighted_to_numpy_matrix)\n', (4813, 4847), False, 'import inspect\n'), ((9295, 9346), 'inspect.getfullargspec', 'inspect.getfullargspec', (['weighted_directed_lfr_graph'], {}), '(weighted_directed_lfr_graph)\n', (9317, 9346), False, 'import inspect\n'), ((9452, 9508), 'inspect.getfullargspec', 'inspect.getfullargspec', (['convert_weighted_to_numpy_matrix'], {}), '(convert_weighted_to_numpy_matrix)\n', (9474, 9508), False, 'import inspect\n'), ((13233, 13288), 'inspect.getfullargspec', 'inspect.getfullargspec', (['unweighted_undirected_lfr_graph'], {}), '(unweighted_undirected_lfr_graph)\n', (13255, 13288), False, 'import inspect\n'), ((13394, 13452), 'inspect.getfullargspec', 'inspect.getfullargspec', (['convert_unweighted_to_numpy_matrix'], {}), '(convert_unweighted_to_numpy_matrix)\n', (13416, 13452), False, 'import inspect\n'), ((17107, 17160), 'inspect.getfullargspec', 'inspect.getfullargspec', (['unweighted_directed_lfr_graph'], {}), '(unweighted_directed_lfr_graph)\n', (17129, 17160), False, 'import inspect\n'), ((17266, 17324), 'inspect.getfullargspec', 'inspect.getfullargspec', (['convert_unweighted_to_numpy_matrix'], {}), '(convert_unweighted_to_numpy_matrix)\n', (17288, 17324), False, 'import inspect\n')] |
from typing import Tuple, Union
import numpy as np
import torch
from torchsparse.utils import make_ntuple
__all__ = ['get_kernel_offsets']
def get_kernel_offsets(size: Union[int, Tuple[int, ...]],
stride: Union[int, Tuple[int, ...]] = 1,
dilation: Union[int, Tuple[int, ...]] = 1,
device: str = 'cpu') -> torch.Tensor:
size = make_ntuple(size, ndim=3)
stride = make_ntuple(stride, ndim=3)
dilation = make_ntuple(dilation, ndim=3)
offsets = [(np.arange(-size[k] // 2 + 1, size[k] // 2 + 1) * stride[k]
* dilation[k]) for k in range(3)]
# This condition check is only to make sure that our weight layout is
# compatible with `MinkowskiEngine`.
if np.prod(size) % 2 == 1:
offsets = [[x, y, z] for z in offsets[2] for y in offsets[1]
for x in offsets[0]]
else:
offsets = [[x, y, z] for x in offsets[0] for y in offsets[1]
for z in offsets[2]]
offsets = torch.tensor(offsets, dtype=torch.int, device=device)
return offsets
| [
"numpy.arange",
"numpy.prod",
"torch.tensor",
"torchsparse.utils.make_ntuple"
] | [((404, 429), 'torchsparse.utils.make_ntuple', 'make_ntuple', (['size'], {'ndim': '(3)'}), '(size, ndim=3)\n', (415, 429), False, 'from torchsparse.utils import make_ntuple\n'), ((443, 470), 'torchsparse.utils.make_ntuple', 'make_ntuple', (['stride'], {'ndim': '(3)'}), '(stride, ndim=3)\n', (454, 470), False, 'from torchsparse.utils import make_ntuple\n'), ((486, 515), 'torchsparse.utils.make_ntuple', 'make_ntuple', (['dilation'], {'ndim': '(3)'}), '(dilation, ndim=3)\n', (497, 515), False, 'from torchsparse.utils import make_ntuple\n'), ((1032, 1085), 'torch.tensor', 'torch.tensor', (['offsets'], {'dtype': 'torch.int', 'device': 'device'}), '(offsets, dtype=torch.int, device=device)\n', (1044, 1085), False, 'import torch\n'), ((765, 778), 'numpy.prod', 'np.prod', (['size'], {}), '(size)\n', (772, 778), True, 'import numpy as np\n'), ((533, 579), 'numpy.arange', 'np.arange', (['(-size[k] // 2 + 1)', '(size[k] // 2 + 1)'], {}), '(-size[k] // 2 + 1, size[k] // 2 + 1)\n', (542, 579), True, 'import numpy as np\n')] |
import numpy as np
import utils
import glob
from natsort import natsorted
import pandas as pd
from scipy.io.wavfile import read
from splices2npz import load_video, process_audio
"""
Pre-processes data considering already spliced video and audio only
Synchronize video and audio
"""
__author__ = "<NAME>"
is_test = False
if is_test:
ROOT = utils.project_dir_name() + 'data/deap/test_data2/'
else:
ROOT = utils.project_dir_name() + 'data/deap30frames/mp4/'
params = {
'fps': 10,
'root': ROOT,
'new_size': 100, # new frame size (100x100)
'sr': 16000,
'audio_len': 48000,
'results_dir': ROOT,
'seconds': 3,
}
def save_npz(videos, type='train', audio_type='instrumental', emotion_dim='1D', emotion_root='emotion/',
text_root='text/', include_audio=True):
print(videos)
seconds = params['seconds']
frame_hsv_arr, audio_arr, emotion_arr, text_arr = [], [], [], []
for v_int in videos:
v = str(v_int)
# data_path = params['root'] + "Video_emotion_" + v + "_noText/"
data_path = params['root'] + v + "/"
# Load video and corresponding audio
# video_path = data_path + "selected_avi/*.avi"
video_path = data_path + "video_splices_{}secs/*.mp4".format(seconds)
video_filenames = glob.glob(video_path)
video_filenames = natsorted(video_filenames)
# Load corresponding audio
# audio_path = data_path + "selected_wav_eq/*.wav"
if audio_type == 'orig':
audio_path = data_path + "audio_splices_{}secs_16000_c1_16bits/*.wav".format(seconds)
else:
audio_path = data_path + "audio_splices_{}secs_wav2mid2wav_16000_c1_16bits/*.wav".format(seconds)
audio_filenames = glob.glob(audio_path)
audio_filenames = natsorted(audio_filenames)
# Load corresponding emotion
emotion_csv = pd.read_csv(data_path + "{}participant_ratings_{}_splices_{}secs.csv".format(emotion_root, emotion_dim, seconds))
emotion_data = emotion_csv['emotion']
# Load corresponding text
text_csv = pd.read_csv(data_path + "{}text_splices_{}secs.csv".format(text_root, seconds))
text_data = text_csv['text']
for v_filename, a_filename, emotion, text in zip(video_filenames, audio_filenames, emotion_data, text_data):
text = "" if isinstance(text, float) else text
print('Video {}: {}, audio: {}, emotion: {}, text: {}'.
format(v, v_filename.split('/')[-1], a_filename.split('/')[-1], emotion, text))
frame_hsv = load_video(v_filename, params_substitute=params)
if 'deap_raw' in ROOT:
# Make sure the max frame is 25 (for splices of 3 secs with 10fps)
frame_hsv_container = np.zeros( (25, params['new_size'], params['new_size'], 3) )
frame_hsv_container[:np.shape(frame_hsv)[0], :, :, :] = np.array(frame_hsv[:25])
frame_hsv = frame_hsv_container
#frame_hsv = frame_hsv[:25] # np.array(frame_hsv)[:25, :, :, :]
frame_hsv_arr.append(frame_hsv)
rate, audio = read(a_filename) # int numbers -> necessary for SAMPLERNN and CNNSEQ2SEQ models
# print(rate) # 16000 OKAY
audio_arr.append(audio)
emotion_arr.append(emotion)
text_arr.append(text)
# Transpose from (N, 30, 100, 100, 3) to (N, 30, 3, 100, 100)
# frame_hsv_arr = np.array(frame_hsv_arr)
#if 'deap_raw' in ROOT:
# #frame_hsv_arr = np.concatenate(frame_hsv_arr, axis=0)
# frame_hsv_arr = np.stack(frame_hsv_arr, axis=0)
# # frame_hsv_arr = list(map(list, zip(*frame_hsv_arr))) # (16, N, 100, 100, 3)
# s = np.shape(frame_hsv_arr)
# if s.__len__ == 1:
# frame_hsv_arr = frame_hsv_arr[0] #np.squeeze(frame_hsv_arr, axis=0)
# frame_hsv_arr_transpose = np.transpose(frame_hsv_arr, (1, 0, 4, 2, 3))
#else:
frame_hsv_arr_transpose = np.transpose(frame_hsv_arr, (0, 1, 4, 2, 3))
# Pad audio to audio_len if not already
audio_arr_padded = process_audio(audio_arr, pad_size=params['audio_len'])
print("Shapes - video: {}/{}, audio: {}/{}".format(np.shape(frame_hsv_arr), np.shape(frame_hsv_arr_transpose),
np.shape(audio_arr), np.shape(audio_arr_padded)))
# Save in .npz
utils.ensure_dir(params['results_dir'])
save_npz_filename_root = '{}video_feats_HSL_{}fps_{}secs'.format(params['results_dir'], params['fps'], seconds)
if include_audio:
if audio_type == 'orig':
save_npz_filename = save_npz_filename_root + '_origAudio_intAudio_{}_{}.npz'.format(emotion_dim, type)
else:
save_npz_filename = save_npz_filename_root + '_intAudio_{}_{}.npz'.format(emotion_dim, type)
np.savez_compressed(save_npz_filename, HSL_data=frame_hsv_arr_transpose, audio=audio_arr, emotion=emotion_arr,
text=text_arr)
else:
save_npz_filename = save_npz_filename_root + '_{}_{}_noAudio.npz'.format(emotion_dim, type)
np.savez_compressed(save_npz_filename, HSL_data=frame_hsv_arr_transpose, emotion=emotion_arr, text=text_arr)
# Padded audio
if include_audio:
if audio_type == 'orig':
save_npz_filename = save_npz_filename_root + '_origAudio_intAudio_pad_{}_{}.npz'.format(emotion_dim, type)
else:
save_npz_filename = save_npz_filename_root + '_intAudio_pad_{}_{}.npz'.format(emotion_dim, type)
np.savez_compressed(save_npz_filename, HSL_data=frame_hsv_arr_transpose, audio=audio_arr_padded,
emotion=emotion_arr, text=text_arr)
if __name__ == '__main__':
# audio_type = 'orig'
audio_type = 'instrumental'
type = 'test' if is_test else 'train'
videos = np.concatenate((range(1, 16+1), range(19, 40+1)))
#videos = range(3, 5 + 1)
save_npz(videos, type=type, audio_type=audio_type, emotion_dim='1D', include_audio=True,
emotion_root='', text_root='')
| [
"utils.ensure_dir",
"numpy.transpose",
"utils.project_dir_name",
"numpy.zeros",
"splices2npz.process_audio",
"scipy.io.wavfile.read",
"numpy.shape",
"numpy.savez_compressed",
"numpy.array",
"splices2npz.load_video",
"glob.glob",
"natsort.natsorted"
] | [((4001, 4045), 'numpy.transpose', 'np.transpose', (['frame_hsv_arr', '(0, 1, 4, 2, 3)'], {}), '(frame_hsv_arr, (0, 1, 4, 2, 3))\n', (4013, 4045), True, 'import numpy as np\n'), ((4113, 4167), 'splices2npz.process_audio', 'process_audio', (['audio_arr'], {'pad_size': "params['audio_len']"}), "(audio_arr, pad_size=params['audio_len'])\n", (4126, 4167), False, 'from splices2npz import load_video, process_audio\n'), ((4412, 4451), 'utils.ensure_dir', 'utils.ensure_dir', (["params['results_dir']"], {}), "(params['results_dir'])\n", (4428, 4451), False, 'import utils\n'), ((357, 381), 'utils.project_dir_name', 'utils.project_dir_name', ([], {}), '()\n', (379, 381), False, 'import utils\n'), ((425, 449), 'utils.project_dir_name', 'utils.project_dir_name', ([], {}), '()\n', (447, 449), False, 'import utils\n'), ((1307, 1328), 'glob.glob', 'glob.glob', (['video_path'], {}), '(video_path)\n', (1316, 1328), False, 'import glob\n'), ((1355, 1381), 'natsort.natsorted', 'natsorted', (['video_filenames'], {}), '(video_filenames)\n', (1364, 1381), False, 'from natsort import natsorted\n'), ((1758, 1779), 'glob.glob', 'glob.glob', (['audio_path'], {}), '(audio_path)\n', (1767, 1779), False, 'import glob\n'), ((1806, 1832), 'natsort.natsorted', 'natsorted', (['audio_filenames'], {}), '(audio_filenames)\n', (1815, 1832), False, 'from natsort import natsorted\n'), ((4865, 4994), 'numpy.savez_compressed', 'np.savez_compressed', (['save_npz_filename'], {'HSL_data': 'frame_hsv_arr_transpose', 'audio': 'audio_arr', 'emotion': 'emotion_arr', 'text': 'text_arr'}), '(save_npz_filename, HSL_data=frame_hsv_arr_transpose,\n audio=audio_arr, emotion=emotion_arr, text=text_arr)\n', (4884, 4994), True, 'import numpy as np\n'), ((5137, 5249), 'numpy.savez_compressed', 'np.savez_compressed', (['save_npz_filename'], {'HSL_data': 'frame_hsv_arr_transpose', 'emotion': 'emotion_arr', 'text': 'text_arr'}), '(save_npz_filename, HSL_data=frame_hsv_arr_transpose,\n emotion=emotion_arr, text=text_arr)\n', (5156, 5249), True, 'import numpy as np\n'), ((5571, 5707), 'numpy.savez_compressed', 'np.savez_compressed', (['save_npz_filename'], {'HSL_data': 'frame_hsv_arr_transpose', 'audio': 'audio_arr_padded', 'emotion': 'emotion_arr', 'text': 'text_arr'}), '(save_npz_filename, HSL_data=frame_hsv_arr_transpose,\n audio=audio_arr_padded, emotion=emotion_arr, text=text_arr)\n', (5590, 5707), True, 'import numpy as np\n'), ((2591, 2639), 'splices2npz.load_video', 'load_video', (['v_filename'], {'params_substitute': 'params'}), '(v_filename, params_substitute=params)\n', (2601, 2639), False, 'from splices2npz import load_video, process_audio\n'), ((3152, 3168), 'scipy.io.wavfile.read', 'read', (['a_filename'], {}), '(a_filename)\n', (3156, 3168), False, 'from scipy.io.wavfile import read\n'), ((4223, 4246), 'numpy.shape', 'np.shape', (['frame_hsv_arr'], {}), '(frame_hsv_arr)\n', (4231, 4246), True, 'import numpy as np\n'), ((4248, 4281), 'numpy.shape', 'np.shape', (['frame_hsv_arr_transpose'], {}), '(frame_hsv_arr_transpose)\n', (4256, 4281), True, 'import numpy as np\n'), ((4338, 4357), 'numpy.shape', 'np.shape', (['audio_arr'], {}), '(audio_arr)\n', (4346, 4357), True, 'import numpy as np\n'), ((4359, 4385), 'numpy.shape', 'np.shape', (['audio_arr_padded'], {}), '(audio_arr_padded)\n', (4367, 4385), True, 'import numpy as np\n'), ((2796, 2853), 'numpy.zeros', 'np.zeros', (["(25, params['new_size'], params['new_size'], 3)"], {}), "((25, params['new_size'], params['new_size'], 3))\n", (2804, 2853), True, 'import numpy as np\n'), ((2928, 2952), 'numpy.array', 'np.array', (['frame_hsv[:25]'], {}), '(frame_hsv[:25])\n', (2936, 2952), True, 'import numpy as np\n'), ((2893, 2912), 'numpy.shape', 'np.shape', (['frame_hsv'], {}), '(frame_hsv)\n', (2901, 2912), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
import scipy.signal as signal
import scipy.optimize as optimize
import numpy as np
import operator
import warnings
# TODO: DOCUMENT
__all__ = [
'LineScan'
]
class LineScan(list):
"""
A line scan is an one dimensional signal pulled from the intensity
of a series of a pixels in ang image. LineScan allows you to do a
series of operations just like on an image class object. You can
also treat the line scan as a python list object. A LineScan object
is automatically generated by calling ImageClass.get_line_scan on an
image. You can also roll your own by declaring a LineScan object
and passing the constructor a 1xN list of values.
Examples:
>>> import matplotlib.pyplot as plt
>>>> img = Image('lena')
>>>> s = img.get_linescan(y=128)
>>>> ss = s.smooth()
>>>> plt.plot(s)
>>>> plt.plot(ss)
>>>> plt.show()
"""
point_loc = None
image = None
def __init__(self, args, **kwargs):
if isinstance(args, np.ndarray):
args = args.tolist()
super(LineScan, self).__init__(args)
self.image = None
self.pt1 = None
self.pt2 = None
self.row = None
self.col = None
self.channel = -1
for key in kwargs:
if key in self.__dict__:
self.__dict__[key] = kwargs[key]
if self.point_loc is None:
self.point_loc = zip(range(0, len(self)), range(0, len(self)))
def _update(self, obj):
"""
Update LineScan instance object.
:param obj: LineScan instance.
:return: None.
"""
self.image = obj.image
self.pt1 = obj.pt1
self.pt2 = obj.pt2
self.row = obj.row
self.col = obj.col
self.channel = obj.channel
self.point_loc = obj.point_loc
def __getitem__(self, key):
"""
:param key: index or slice.
:return: a LineScan sliced.
"""
item = super(LineScan, self).__getitem__(key)
if isinstance(key, slice):
return LineScan(item)
else:
return item
def __sub__(self, other):
if len(self) == len(other):
ret = LineScan(map(operator.sub, self, other))
else:
print("Size mismatch.")
return None
ret._update(self)
return ret
def __add__(self, other):
if len(self) == len(other):
ret = LineScan(map(operator.add, self, other))
else:
print("Size mismatch.")
return None
ret._update(self)
return ret
def __mul__(self, other):
if len(self) == len(other):
ret = LineScan(map(operator.mul, self, other))
else:
print("Size mismatch.")
return None
ret._update(self)
return ret
def __div__(self, other):
if len(self) == len(other):
try:
ret = LineScan(map(operator.div, self, other))
except ZeroDivisionError:
print("Second LineScan contains zeros.")
return None
else:
print("Size mismatch.")
return None
ret._update(self)
def smooth(self, degree=3):
"""
Perform a Gaussian simple smoothing operation on the signal.
:param degree: degree of the fitting function. Higher degree means
more smoothing.
:return: a smoothed LineScan object.
Notes:
Cribbed from
http://www.swharden.com/blog/2008-11-17-linear-data-smoothing-in-python/
"""
window = degree * 2 - 1
weight = np.array([1.0] * window)
weight_gauss = []
for i in range(window):
i = i - degree + 1
frac = i / float(window)
gauss = 1 / np.exp((4 * frac) ** 2)
weight_gauss.append(gauss)
weight = np.array(weight_gauss) * weight
smoothed = [0.0] * (len(self) - window)
for i in range(len(smoothed)):
smoothed[i] = sum(np.array(self[i:i + window]) * weight) / sum(weight)
front = self[0:degree - 1]
front += smoothed
front += self[-1 * degree:]
ret = LineScan(front, image=self.image, point_loc=self.point_loc,
pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def normalize(self):
"""
Normalize the signal so the maximum value is scaled to one.
:return: a normalized ScanLine object.
"""
tmp = np.array(self, dtype='float32')
tmp /= np.max(tmp)
ret = LineScan(list(tmp[:]), image=self.image,
point_loc=self.point_loc, pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def scale(self, val_range=(0, 1)):
"""
Scale the signal so the max and min values are all scaled to the values
in _val_range. This is handy if you want to compare the shape of tow
signals that are scaled to different ranges.
:param val_range: a tuple that provides the range of output signal.
:return: a scaled LineScan object.
"""
tmp = np.array(self, dtype='float32')
vmax = np.max(tmp)
vmin = np.min(tmp)
a = np.min(val_range)
b = np.max(val_range)
tmp = (((b - a) / (vmax - vmin)) * (tmp - vmin)) + a
ret = LineScan(list(tmp[:]), image=self.image,
point_loc=self.point_loc, pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def minima(self):
"""
Global minima in the line scan.
:return: a list of tuples of the format: (LineScanIndex, MinimaValue,
(image_position_x, image_position_y))
"""
minvalue = np.min(self)
idxs = np.where(np.array(self) == minvalue)[0]
minvalue = np.ones((1, len(idxs))) * minvalue
minvalue = minvalue[0]
pts = np.array(self.point_loc)
pts = pts[idxs]
pts = [(p[0], p[1]) for p in pts]
return zip(idxs, minvalue, pts)
def maxima(self):
"""
Global maxima in the line scan.
:return: a list of tuples of the format: (LineScanIndex, MaximaValue,
(image_position_x, image_position_y))
"""
maxvalue = np.max(self)
idxs = np.where(np.array(self) == maxvalue)[0]
maxvalue = np.ones((1, len(idxs))) * maxvalue
maxvalue = maxvalue[0]
pts = np.array(self.point_loc)
pts = pts[idxs]
pts = [(p[0], p[1]) for p in pts]
return zip(idxs, maxvalue, pts)
def derivative(self):
"""
Finds the discrete derivative of the signal. The discrete derivative
is simply the difference between each successive samples. A good use of
this function is edge detection.
:return: a LineScan object.
"""
tmp = np.array(self, dtype='float32')
d = [0]
d += list(tmp[1:] - tmp[0:-1])
ret = LineScan(d, image=self, point_loc=self.point_loc,
pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def local_minima(self):
"""
Local minima are defined as points that are less than their neighbors
to the left and to the right.
:return: a list of tuples of the format: (LineScanIndex, MaximaValue,
(image_position_x, image_position_y))
"""
tmp = np.array(self)
idx = np.r_[True, tmp[1:] < tmp[:-1]] & np.r_[tmp[:-1] < tmp[1:], True]
i = np.where(idx is True)[0]
values = tmp[i]
pts = np.array(self.point_loc)
pts = pts[i]
pts = [(p[0], p[1]) for p in pts]
return zip(i, values, pts)
def local_maxmima(self):
"""
Local minima are defined as points that are less than their neighbors
to the left and to the right.
:return: a list of tuples of the format: (LineScanIndex, MaximaValue,
(image_position_x, image_position_y))
"""
tmp = np.array(self)
idx = np.r_[True, tmp[1:] > tmp[:-1]] & np.r_[tmp[:-1] > tmp[1:], True]
i = np.where(idx is True)[0]
values = tmp[i]
pts = np.array(self.point_loc)
pts = pts[i]
pts = [(p[0], p[1]) for p in pts]
return zip(i, values, pts)
def resample(self, n=100):
"""
Re-sample the signal to fit into n samples. This method is handy
if you would like to resize multiple signals so that they fit
together nice. Note that using n < len(LineScan) can cause data loss.
:param n: number of samples to reshape to.
:return: a LineScan object of length n.
"""
sig = signal.resample(self, n)
pts = np.array(self.point_loc)
x = np.linspace(pts[0, 0], pts[-1, 0], n)
y = np.linspace(pts[0, 1], pts[-1, 1], n)
pts = zip(x, y)
ret = LineScan(list(sig), image=self.image, point_loc=self.point_loc,
pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def fit2model(self, func, p0=None):
"""
Fit the data to the provided model. This can be any
arbitrary 2D signal.
:param func: a function of the form func(x_values, p0, p1, ... pn)
where p is parameter for the model.
:param p0: a list of the initial guess for the model parameters.
:return: a LineScan object where the fitted model data replaces
the actual data.
"""
yvals = np.array(self, dtype='float32')
xvals = range(0, len(yvals), 1)
popt, pcov = optimize.curve_fit(func, xvals, yvals, p0=p0)
yvals = func(xvals, *popt)
ret = LineScan(list(yvals), image=self.image,
point_loc=self.point_loc, pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def get_model_params(self, func, p0=None):
"""
Fit a model to the data and then return.
:param func: a function of the form func(x_values, p0, p1, ... pn)
where p is parameter for the model.
:param p0: a list of the initial guess for the model parameters.
:return: The model parameters as a list.
"""
yvals = np.array(self, dtype='float32')
xvals = range(0, len(yvals), 1)
popt, pcov = optimize.curve_fit(func, xvals, yvals, p0=p0)
return popt
def convolve(self, kernel):
"""
Convolve the line scan with a one dimensional kernel stored as
a list. Allows you to create an arbitrary filter for the signal.
:param kernel: an Nx1 list or np.array that defines the kernel.
:return: a LineScan features with the kernel applied. We crop
the fiddly bits at the end and the begging of the kernel
so everything lines up nicely.
"""
out = np.convolve(self, np.array(kernel, dtype='float32'), 'same')
ret = LineScan(out, image=self.image, point_loc=self.point_loc,
pt1=self.pt1, pt2=self.pt2, channel=self.channel)
return ret
def fft(self):
"""
Perform a Fast Fourier Transform on the line scan and return
the FFT output and the frequency of each value.
:return: the FFT as a numpy array of irrational numbers and a one
dimensional list of frequency values.
"""
sig = np.array(self, dtype='float32')
fft = np.fft.fft(sig)
freq = np.fft.fftfreq(len(sig))
return fft, freq
def ifft(self, fft):
"""
Perform a inverse Fast Fourier Transform on the provided irrationally
valued signal and return the results as a LineScan.
:param fft: a one dimensional numpy array of irrational values upon
which we will perform the IFFT.
:return: a LineScan object of the reconstructed signal.
"""
sig = np.fft.ifft(fft)
ret = LineScan(sig.real)
ret.image = self.image
ret.point_loc = self.point_loc
return ret
def lut(val=-1):
"""
Create an empty look up table(LUT)
:param val: If default value is what the lut is initially filled with
if val == 0
the array is all zeros.
if val > 0
the array is set to default value. Clipped to 255.
if val < 0
the array is set to the range [0,255]
if val is a tuple of two values:
we set stretch the range of 0 to 255 to match the range provided.
:return: a LUT.
"""
lut = None
if isinstance(val, list) or isinstance(val, tuple):
start = np.clip(val[0], 0, 255)
stop = np.clip(val[1], 0, 255)
lut = np.around(np.linsapce(start, stop, 256), 0)
lut = np.array(lut, dtype='uint8')
lut = lut.tolist()
elif val == 0:
lut = np.zeros([1, 256]).tolist()[0]
elif val > 0:
val = np.clip(val, 1, 255)
lut = np.ones([1, 256]) * val
lut = np.array(lut, dtype='uint8')
lut = lut.tolist()
elif val < 0:
lut = np.linspace(0, 256, 256)
lut = np.array(lut, dtype='uint8')
lut = lut.tolist()
return lut
def fill_lut(self, lut, idxs, value=255):
"""
Fill up an existing LUT at the indexes specified by idxs
with the value specified by value. This is useful for picking
out specific values.
:param lut: an existing LUT (just a list of 255 values).
:param idxs: the indexes of the LUT to fill with the value.
This can also be a sample swatch of an image.
:param value: the value to set the LUT[idx] to.
:return: an updated LUT.
"""
if idxs.__class__.__name__ == 'Image':
npg = idxs.getGrayNumpy()
npg = npg.reshape([npg.shape[0] * npg.shape[1]])
idxs = npg.tolist()
val = np.clip(value, 0, 255)
for idx in idxs:
if 0 <= idx < len(lut):
lut[idx] = val
return lut
def threshold(self, thresh=128, invert=False):
"""
Do a 1-D threshold operation. Values about the threshold will
be set to 255, values below the threshold will be set to 0.
If invert is true we do the opposite.
:param thresh: the cutoff value for threshold.
:param invert: if invert is False, above the threshold are set
to 255, if invert is True, set to 0.
:return: the thresholded LineScan operation.
"""
out = []
high = 255
low = 0
if invert:
high = 0
low = 255
for p in self:
if p < thresh:
out.append(low)
else:
out.append(high)
ret = LineScan(out, image=self.image, point_loc=self.point_loc,
pt1=self.pt1, ptw=self.pt2)
ret._update(self)
return ret
def invert(self, maxv=255):
"""
Do an 8bit invert of the signal. What was black is now white.
:param maxv: the maximum value of a pixel in the image, usually 255.
:return: the inverted LineScan object.
"""
out = []
for p in self:
out.append(255-p)
ret = LineScan(out, image=self.image, point_loc=self.point_loc,
pt1=self.pt1, ptw=self.pt2)
ret._update(self)
return ret
def mean(self):
"""
Computes the statistical mean of the signal.
:return: the mean of the LineScan object.
"""
return sum(self) / len(self)
def variance(self):
"""
Computes the variance of the signal.
:return: the variance of the LineScan object.
"""
mean = sum(self) / len(self)
summation = 0
for num in self:
summation += (num - mean)**2
return summation / len(self)
def deviation(self):
"""
Computes the standard deviation of the signal.
:return: the standard deviation of the LineScan object.
"""
mean = sum(self) / len(self)
summation = 0
for num in self:
summation += (num - mean)**2
return np.sqrt(summation / len(self))
def median(self, size=5):
"""
Do a sliding median filter.
Args:
size (int): window size
Returns:
the LineScan after being passed through the median filter.
the last index where the value occurs or None if none is found.
"""
if size % 2 == 0:
size += 1
skip = int(np.floor(size / 2))
out = self[0:skip]
vsz = len(self)
for i in range(skip, vsz-skip):
val = np.median(self[i - skip:i + skip])
out.append(val)
for p in self[-1*skip:]:
out.append(p)
ret = LineScan(out, image=self.image, point_loc=self.point_loc,
pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def find_first_index_equal(self, value=255):
"""
Find the index of the first element of the LineScan that has a
value equal to value. If nothing found, None is returned.
:param value: the value to look for.
:return: the first index where the value occurs or None if not found.
"""
vals = np.where(np.array(self) == value)[0]
ret = None
if len(vals) > 0:
ret = vals[0]
return ret
def find_last_index_equal(self, value=255):
"""
Find the index of the last element of the LineScan. If nothing found,
None is returned.
:param value: the value to look for.
:return: the last index where the value occurs or None if not found.
"""
vals = np.where(np.array(self) == value)[0]
ret = None
if len(vals) > 0:
ret = vals[-1]
return ret
def find_first_index_greater(self, value=255):
"""
Find the index of the first element of the LineScan that has a
value equal to value. If nothing found, None is returned.
:param value: the value to look for.
:return: the first index where the value occurs or None if not found.
"""
vals = np.where(np.array(self) >= value)[0]
ret = None
if len(vals) > 0:
ret = vals[0]
return ret
def apply_lut(self, lut):
"""
Apply a lut to the signal.
:param lut: an array of length 256, the array elements are the
values that are replaced via the lut.
:return: a LineScan object with the lut applied to the values.
"""
out = []
for p in self:
out.append(lut[p])
ret = LineScan(out, image=self.image, point_loc=self.point_loc,
pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def median_filter(self, kernel_size=5):
"""
Apply median filter on the data.
:param kernel_size: size of the filter (should be odd int) - int
:return: a LineScan object with the median filter applied
to the values.
"""
try:
from signal import medfilt
except ImportError:
warnings.warn("Scipy version >= 0.11 required.")
return None
if kernel_size % 2 == 0:
kernel_size -= 1
print("Kernel Size should be odd.")
medfilt_array = medfilt(np.asarray(self[:]), kernel_size)
ret = LineScan(medfilt_array.astype('uint8').tolist(),
image=self.image, point_loc=self.point_loc,
pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def detrend(self):
"""
Detrend the data
:return: a LineScan object with detrend data.
"""
try:
from signal import detrend as scidetrend
except ImportError:
warnings.warn("Scipy version >= 0.11 required.")
return None
detrend_arr = scidetrend(np.asarray(self[:]))
ret = LineScan(detrend_arr.astype('uint8').tolist(),
image=self.image, point_loc=self.point_loc,
pt1=self.pt1, pt2=self.pt2)
ret._update(self)
return ret
def running_average(self, diameter=3, kernel='uniform'):
"""
Finds the running average by either using a uniform kernel or
using a gaussian kernel. The gaussian kernels calculated from
the standard normal distribution formula.
:param diameter: size of the window (should be odd int) - int
:param kernel: 'uniform' (default) / 'gaussian' - used to decide
the kernel - string.
:return: a LineScan object with the kernel of the provided
algorithm applied.
"""
k = list()
if diameter % 2 == 0:
warnings.warn("Diameter mush be an odd integer.")
return None
if kernel == 'uniform':
k = list(1 / float(diameter) * np.ones(diameter))
elif kernel == 'gaussian':
r = diameter / 2
for i in range(-int(r), int(r) + 1):
k.append(np.exp(-i ** 2 / (2 * (r / 3) ** 2)) /
np.sqrt(2 * np.pi) * (r / 3))
ret = LineScan(map(int, self.convolve(k)))
ret._update(self)
return ret
def find_peaks(self, window=30, delta=3):
"""
Find the peaks in a LineScan.
:param window: the size of the window in which the peak should have
the highest value to be considered as a peak. By
default this is 15 as it gives appropriate results.
The lower this value the more the peaks are returned.
:param delta: the minimum difference between the peak and all elements
in the window
:return: a list of (peak position, peak value) tuples.
"""
maximum = -np.Inf
width = int(window / 2)
peaks = []
for i, val in enumerate(self):
if val > maximum:
maximum = val
max_pos = i
# checking whether peak satisfies window and delta conditions
if max(self[max(0, i-width):i+width]) + delta < maximum:
peaks.append((max_pos, maximum))
maximum = -np.Inf
return peaks
def find_valleys(self, window=30, delta=3):
"""
Finds the valleys in a LineScan.
Args:
window (int): the size of the window in which the valley should
have the highest value to be considered as a valley.
By default this is 15 as it gives appropriate results.
The lower this value the more the valleys are returned
delta (int): the minimum difference between the valley and all
elements in the window
Returns:
(list) valley position, peak value tuples.
"""
minimum = -np.Inf
width = int(window / 2)
valleys = []
for i, val in enumerate(self):
if val < minimum:
minimum = val
min_pos = i
# checking whether peak satisfies window and delta conditions
if min(self[max(0, i - width):i + width]) - delta < minimum:
valleys.append((min_pos, minimum))
minimum = -np.Inf
return valleys
def fit_spline(self, degree=2):
"""
Generates a spline _curve fitting over the points in LineScan with
order of precision given by the parameter degree.
:param degree: the precision of the generated spline.
:return: the spline as a LineScan fitting over the initial values of
LineScan
Notes:
Implementation taken from http://www.scipy.org/Cookbook/Interpolation
"""
if degree > 4:
degree = 4 # No significant improvement with respect to time usage
if degree < 1:
warnings.warn("LineScan.fit_spline - degree needs to be >= 1.")
return None
y = np.array(self)
x = np.arange(0, len(y), 1)
dx = 1
newx = np.arange(0, len(y) - 1, pow(0.1, degree))
cj = signal.cspline1d(y)
ret = signal.cspline1d_eval(cj, newx, dx=dx, x0=x[0])
return ret
| [
"numpy.linsapce",
"numpy.floor",
"numpy.ones",
"numpy.clip",
"numpy.exp",
"numpy.fft.fft",
"numpy.max",
"numpy.linspace",
"numpy.fft.ifft",
"numpy.median",
"numpy.asarray",
"scipy.signal.cspline1d_eval",
"scipy.optimize.curve_fit",
"numpy.min",
"scipy.signal.resample",
"numpy.zeros",
... | [((3839, 3863), 'numpy.array', 'np.array', (['([1.0] * window)'], {}), '([1.0] * window)\n', (3847, 3863), True, 'import numpy as np\n'), ((4746, 4777), 'numpy.array', 'np.array', (['self'], {'dtype': '"""float32"""'}), "(self, dtype='float32')\n", (4754, 4777), True, 'import numpy as np\n'), ((4793, 4804), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (4799, 4804), True, 'import numpy as np\n'), ((5390, 5421), 'numpy.array', 'np.array', (['self'], {'dtype': '"""float32"""'}), "(self, dtype='float32')\n", (5398, 5421), True, 'import numpy as np\n'), ((5437, 5448), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (5443, 5448), True, 'import numpy as np\n'), ((5464, 5475), 'numpy.min', 'np.min', (['tmp'], {}), '(tmp)\n', (5470, 5475), True, 'import numpy as np\n'), ((5488, 5505), 'numpy.min', 'np.min', (['val_range'], {}), '(val_range)\n', (5494, 5505), True, 'import numpy as np\n'), ((5518, 5535), 'numpy.max', 'np.max', (['val_range'], {}), '(val_range)\n', (5524, 5535), True, 'import numpy as np\n'), ((6015, 6027), 'numpy.min', 'np.min', (['self'], {}), '(self)\n', (6021, 6027), True, 'import numpy as np\n'), ((6182, 6206), 'numpy.array', 'np.array', (['self.point_loc'], {}), '(self.point_loc)\n', (6190, 6206), True, 'import numpy as np\n'), ((6553, 6565), 'numpy.max', 'np.max', (['self'], {}), '(self)\n', (6559, 6565), True, 'import numpy as np\n'), ((6720, 6744), 'numpy.array', 'np.array', (['self.point_loc'], {}), '(self.point_loc)\n', (6728, 6744), True, 'import numpy as np\n'), ((7151, 7182), 'numpy.array', 'np.array', (['self'], {'dtype': '"""float32"""'}), "(self, dtype='float32')\n", (7159, 7182), True, 'import numpy as np\n'), ((7715, 7729), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (7723, 7729), True, 'import numpy as np\n'), ((7885, 7909), 'numpy.array', 'np.array', (['self.point_loc'], {}), '(self.point_loc)\n', (7893, 7909), True, 'import numpy as np\n'), ((8327, 8341), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (8335, 8341), True, 'import numpy as np\n'), ((8497, 8521), 'numpy.array', 'np.array', (['self.point_loc'], {}), '(self.point_loc)\n', (8505, 8521), True, 'import numpy as np\n'), ((9011, 9035), 'scipy.signal.resample', 'signal.resample', (['self', 'n'], {}), '(self, n)\n', (9026, 9035), True, 'import scipy.signal as signal\n'), ((9050, 9074), 'numpy.array', 'np.array', (['self.point_loc'], {}), '(self.point_loc)\n', (9058, 9074), True, 'import numpy as np\n'), ((9087, 9124), 'numpy.linspace', 'np.linspace', (['pts[0, 0]', 'pts[-1, 0]', 'n'], {}), '(pts[0, 0], pts[-1, 0], n)\n', (9098, 9124), True, 'import numpy as np\n'), ((9137, 9174), 'numpy.linspace', 'np.linspace', (['pts[0, 1]', 'pts[-1, 1]', 'n'], {}), '(pts[0, 1], pts[-1, 1], n)\n', (9148, 9174), True, 'import numpy as np\n'), ((9857, 9888), 'numpy.array', 'np.array', (['self'], {'dtype': '"""float32"""'}), "(self, dtype='float32')\n", (9865, 9888), True, 'import numpy as np\n'), ((9950, 9995), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['func', 'xvals', 'yvals'], {'p0': 'p0'}), '(func, xvals, yvals, p0=p0)\n', (9968, 9995), True, 'import scipy.optimize as optimize\n'), ((10599, 10630), 'numpy.array', 'np.array', (['self'], {'dtype': '"""float32"""'}), "(self, dtype='float32')\n", (10607, 10630), True, 'import numpy as np\n'), ((10692, 10737), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['func', 'xvals', 'yvals'], {'p0': 'p0'}), '(func, xvals, yvals, p0=p0)\n', (10710, 10737), True, 'import scipy.optimize as optimize\n'), ((11778, 11809), 'numpy.array', 'np.array', (['self'], {'dtype': '"""float32"""'}), "(self, dtype='float32')\n", (11786, 11809), True, 'import numpy as np\n'), ((11824, 11839), 'numpy.fft.fft', 'np.fft.fft', (['sig'], {}), '(sig)\n', (11834, 11839), True, 'import numpy as np\n'), ((12301, 12317), 'numpy.fft.ifft', 'np.fft.ifft', (['fft'], {}), '(fft)\n', (12312, 12317), True, 'import numpy as np\n'), ((14396, 14418), 'numpy.clip', 'np.clip', (['value', '(0)', '(255)'], {}), '(value, 0, 255)\n', (14403, 14418), True, 'import numpy as np\n'), ((24931, 24945), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (24939, 24945), True, 'import numpy as np\n'), ((25068, 25087), 'scipy.signal.cspline1d', 'signal.cspline1d', (['y'], {}), '(y)\n', (25084, 25087), True, 'import scipy.signal as signal\n'), ((25102, 25149), 'scipy.signal.cspline1d_eval', 'signal.cspline1d_eval', (['cj', 'newx'], {'dx': 'dx', 'x0': 'x[0]'}), '(cj, newx, dx=dx, x0=x[0])\n', (25123, 25149), True, 'import scipy.signal as signal\n'), ((4096, 4118), 'numpy.array', 'np.array', (['weight_gauss'], {}), '(weight_gauss)\n', (4104, 4118), True, 'import numpy as np\n'), ((7822, 7843), 'numpy.where', 'np.where', (['(idx is True)'], {}), '(idx is True)\n', (7830, 7843), True, 'import numpy as np\n'), ((8434, 8455), 'numpy.where', 'np.where', (['(idx is True)'], {}), '(idx is True)\n', (8442, 8455), True, 'import numpy as np\n'), ((11258, 11291), 'numpy.array', 'np.array', (['kernel'], {'dtype': '"""float32"""'}), "(kernel, dtype='float32')\n", (11266, 11291), True, 'import numpy as np\n'), ((13056, 13079), 'numpy.clip', 'np.clip', (['val[0]', '(0)', '(255)'], {}), '(val[0], 0, 255)\n', (13063, 13079), True, 'import numpy as np\n'), ((13099, 13122), 'numpy.clip', 'np.clip', (['val[1]', '(0)', '(255)'], {}), '(val[1], 0, 255)\n', (13106, 13122), True, 'import numpy as np\n'), ((13203, 13231), 'numpy.array', 'np.array', (['lut'], {'dtype': '"""uint8"""'}), "(lut, dtype='uint8')\n", (13211, 13231), True, 'import numpy as np\n'), ((17163, 17181), 'numpy.floor', 'np.floor', (['(size / 2)'], {}), '(size / 2)\n', (17171, 17181), True, 'import numpy as np\n'), ((17293, 17327), 'numpy.median', 'np.median', (['self[i - skip:i + skip]'], {}), '(self[i - skip:i + skip])\n', (17302, 17327), True, 'import numpy as np\n'), ((20120, 20139), 'numpy.asarray', 'np.asarray', (['self[:]'], {}), '(self[:])\n', (20130, 20139), True, 'import numpy as np\n'), ((20720, 20739), 'numpy.asarray', 'np.asarray', (['self[:]'], {}), '(self[:])\n', (20730, 20739), True, 'import numpy as np\n'), ((21593, 21642), 'warnings.warn', 'warnings.warn', (['"""Diameter mush be an odd integer."""'], {}), "('Diameter mush be an odd integer.')\n", (21606, 21642), False, 'import warnings\n'), ((24830, 24893), 'warnings.warn', 'warnings.warn', (['"""LineScan.fit_spline - degree needs to be >= 1."""'], {}), "('LineScan.fit_spline - degree needs to be >= 1.')\n", (24843, 24893), False, 'import warnings\n'), ((4015, 4038), 'numpy.exp', 'np.exp', (['((4 * frac) ** 2)'], {}), '((4 * frac) ** 2)\n', (4021, 4038), True, 'import numpy as np\n'), ((13151, 13180), 'numpy.linsapce', 'np.linsapce', (['start', 'stop', '(256)'], {}), '(start, stop, 256)\n', (13162, 13180), True, 'import numpy as np\n'), ((19903, 19951), 'warnings.warn', 'warnings.warn', (['"""Scipy version >= 0.11 required."""'], {}), "('Scipy version >= 0.11 required.')\n", (19916, 19951), False, 'import warnings\n'), ((20614, 20662), 'warnings.warn', 'warnings.warn', (['"""Scipy version >= 0.11 required."""'], {}), "('Scipy version >= 0.11 required.')\n", (20627, 20662), False, 'import warnings\n'), ((6052, 6066), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (6060, 6066), True, 'import numpy as np\n'), ((6590, 6604), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (6598, 6604), True, 'import numpy as np\n'), ((13375, 13395), 'numpy.clip', 'np.clip', (['val', '(1)', '(255)'], {}), '(val, 1, 255)\n', (13382, 13395), True, 'import numpy as np\n'), ((13456, 13484), 'numpy.array', 'np.array', (['lut'], {'dtype': '"""uint8"""'}), "(lut, dtype='uint8')\n", (13464, 13484), True, 'import numpy as np\n'), ((17948, 17962), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (17956, 17962), True, 'import numpy as np\n'), ((18391, 18405), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (18399, 18405), True, 'import numpy as np\n'), ((18872, 18886), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (18880, 18886), True, 'import numpy as np\n'), ((21742, 21759), 'numpy.ones', 'np.ones', (['diameter'], {}), '(diameter)\n', (21749, 21759), True, 'import numpy as np\n'), ((4246, 4274), 'numpy.array', 'np.array', (['self[i:i + window]'], {}), '(self[i:i + window])\n', (4254, 4274), True, 'import numpy as np\n'), ((13414, 13431), 'numpy.ones', 'np.ones', (['[1, 256]'], {}), '([1, 256])\n', (13421, 13431), True, 'import numpy as np\n'), ((13556, 13580), 'numpy.linspace', 'np.linspace', (['(0)', '(256)', '(256)'], {}), '(0, 256, 256)\n', (13567, 13580), True, 'import numpy as np\n'), ((13599, 13627), 'numpy.array', 'np.array', (['lut'], {'dtype': '"""uint8"""'}), "(lut, dtype='uint8')\n", (13607, 13627), True, 'import numpy as np\n'), ((13304, 13322), 'numpy.zeros', 'np.zeros', (['[1, 256]'], {}), '([1, 256])\n', (13312, 13322), True, 'import numpy as np\n'), ((21899, 21935), 'numpy.exp', 'np.exp', (['(-i ** 2 / (2 * (r / 3) ** 2))'], {}), '(-i ** 2 / (2 * (r / 3) ** 2))\n', (21905, 21935), True, 'import numpy as np\n'), ((21963, 21981), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (21970, 21981), True, 'import numpy as np\n')] |
import numpy as np
DATASETS_2D = ['data/dane_2D_1.txt', 'data/dane_2D_2.txt', 'data/dane_2D_3.txt', 'data/dane_2D_4.txt',
'data/dane_2D_5.txt', 'data/dane_2D_6.txt', 'data/dane_2D_7.txt', 'data/dane_2D_8.txt']
DATASETS_2D_Ks = [10, 10, 5, 5, 37, 17, 5, 5]
from sklearn.preprocessing import MinMaxScaler
def get_data_w_labels(filename):
data = np.loadtxt(filename)
x, y = np.hsplit(data, [-1])
return MinMaxScaler().fit(x).transform(x), y
def get_data_wo_labels(filename):
data = np.loadtxt(filename)
return MinMaxScaler().fit(data).transform(data)
| [
"sklearn.preprocessing.MinMaxScaler",
"numpy.loadtxt",
"numpy.hsplit"
] | [((367, 387), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {}), '(filename)\n', (377, 387), True, 'import numpy as np\n'), ((399, 420), 'numpy.hsplit', 'np.hsplit', (['data', '[-1]'], {}), '(data, [-1])\n', (408, 420), True, 'import numpy as np\n'), ((517, 537), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {}), '(filename)\n', (527, 537), True, 'import numpy as np\n'), ((549, 563), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (561, 563), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((432, 446), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (444, 446), False, 'from sklearn.preprocessing import MinMaxScaler\n')] |
import numpy as np
from scipy.misc import derivative
import scipy.optimize as opt
import scipy.stats as st
def check_grad(mod, p0, dx=1e-3):
"""Compare the gradient from mod.loglikelihood_derivative
against numerical derivative.
Tests that the derivatie codes are correct
Args:
mod: the model we are testing
p0: parameters of the model
dx: used for the numerical derivative
Returns:
logLikelihood, grad_array_analytic, grad_array_numerical
"""
p0 = np.array(p0)
l,g,h = mod.loglikelihood_derivative(p0)
g0 = []
for i in range(len(p0)):
def f(x, p0):
p0[i] = x
return mod.loglikelihood(p0)
g0.append(derivative(f, p0[i], dx, args=(np.array(p0), )))
g0 = np.array(g0)
print('analytic: ', ' '.join(['%10.4g'%x for x in g]))
print('numerical: ', ' '.join(['%10.4g'%x for x in g0]))
return l, g, g0
def run_mcmc(mod, p0, perr=None, nwalkers=-10, nrun=100, **kwargs):
"""Run MCMC to estimate the Posterior distributions of the parameters
This uses the emcee package.
Args:
mod: the model object for calulating the likelihood function
p0: starting paramters for chain
perr: estimated uncertainties on the parameters. They are used to
initialize the chains. If not given, the chain is started with
values within 10% of p0
nwalkers: number of walkers in the chains (see emcee for details)
nrun: number of chain runs
Keywords:
sigma_f: the factor that multiples perr used to initialize the
walkers. Default: 0.5
limits: a list of [pmin, pmax] values for the limits on the parameters.
These are effectively used as uniform priors on the parameters
iphi: The indicies of the phase parameters within p0. Used to ensure
that those parameters are cyclic and remain -pi < phi < pi
Returns:
the chain array where the walker axis is flattened.
"""
sigma_f = kwargs.get('sigma_f', 0.5)
limits = kwargs.get('limits', None)
iphi = kwargs.get('iphi', None)
if limits is None:
limits = [[-30,30] for x in p0]
if iphi is None:
iphi = []
def logProb(x, mod):
for ix in range(len(x)):
if ix in iphi:
x[ix] = (x[ix]+np.pi) % (2*np.pi) - np.pi
if x[ix]<limits[ix][0] or x[ix]>limits[ix][1]:
return -np.inf
#if np.any(np.logical_or(x < -30, x > 30)):
# return -np.inf
try:
l = mod.loglikelihood(x)
except np.linalg.LinAlgError:
l = -np.inf
return l
try:
import emcee
except ModuleNotFoundError:
raise RuntimeError('Cannot find emcee. Please install it first')
ndim = len(p0)
if nwalkers < 0: nwalkers = -ndim * nwalkers
pe = p0 * 0.1 if perr is None else perr
p0 = np.random.randn(nwalkers, ndim)*pe*sigma_f + p0
p0 = np.array([[np.clip(xx, l[0], l[1]) for xx,l in zip(x,limits)] for x in p0])
sampler = emcee.EnsembleSampler(nwalkers, ndim, logProb, args=[mod,])
state = sampler.run_mcmc(p0, nrun)
pchain = sampler.flatchain
lchain = sampler.flatlnprobability
print('acceptance fraction: ', np.mean(sampler.acceptance_fraction))
chain = np.hstack([pchain, np.expand_dims(lchain, -1)])
return chain
def maximize(mod, p0, limits=None, ipfix=None, verbose=1, useGrad=False):
"""Maixmize the likelihood of model mod
Use numerical optimization from scipy.optimize.minimize to estimate
the parameters of the model at the likelihood maximum
We the BFGS algorithm.
Args:
mod: model whose likelihood is to be optimized
p0: starting model parameters
limits: a list of [pmin, pmax] values for the limits on the parameters.
These are effectively used as uniform priors on the parameters.
None means all parameters are assumed to be between [-30, 30]
ipfix: parameter indices of p0 to keep fixed during the maximization.
Useful when calculating uncertainties by stepping through them.
verbose: if True, print progress
useGrad: use analytical gradient. This may give ~10% speedup, but it can be
unstable for complex problems.
Returns:
return (pars_best, pars_best_error, fit_result)
the latter is from scipy.optimize.minimize
"""
if limits is None:
limits = [[-30,30] for x in p0]
if ipfix is None:
ipfix = []
npar = len(p0)
pfix = np.array([p0[i] for i in ipfix])
ivar = [i for i in range(npar) if not i in ipfix]
info = [npar, pfix, ipfix, ivar]
# main negative log-likelihood function #
def f(x, mod, info):
npar, pfix, ipfix, ivar = info
x = np.array([np.clip(xx, l[0], l[1]) for xx,l in zip(x,limits)])
y = np.zeros(npar, np.double)
y[ipfix] = pfix
y[ivar ] = x
#y = np.array([np.clip(xx, l[0], l[1]) for xx,l in zip(y,limits)])
try:
l = mod.loglikelihood(y)
except np.linalg.LinAlgError:
l = -1e6
if verbose and not useGrad:
print('%10.6g | %s \r'%(l, ' '.join(['%10.3g'%xx for xx in x])), end="")
return -l
# first derivative of the negative log-likelihood
def fprime(x, mod, info):
npar, pfix, ipfix, ivar = info
x = np.array([np.clip(xx, l[0], l[1]) for xx,l in zip(x,limits)])
y = np.zeros(npar, np.double)
y[ipfix] = pfix
y[ivar ] = x
try:
l, g = mod.loglikelihood_derivative(y, calc_fisher=False)
g = g[ivar]
except np.linalg.LinAlgError:
l = -1e6
g = x*0 - 1e6
if verbose:
#print('%10.6g | %s | %s\r'%(l,
# ' '.join(['%10.3g'%xx for xx in x]), ' '.join(['%10.3g'%xx for xx in g])), end="")
print('%10.6g | %s | %s\r'%(l,
' '.join(['%10.3g'%xx for xx in x]), '%10.3g'%np.max(np.abs(g))), end="")
return -g
if not useGrad:
fprime = None
res = opt.minimize(f, p0[ivar], args=(mod, info), method='BFGS', tol=1e-4, jac=fprime,
options={'gtol':1e-4})
# last print
if verbose:
print('%10.6g | %s | %s\r'%(-res.fun,
' '.join(['%10.3g'%xx for xx in res.x]), '%10.3g'%np.max(np.abs(res.jac))), end="")
print('\n** done **\n')
p, pe = res.x, np.diag(res.hess_inv)**0.5
y, ye = np.zeros(npar, np.double), np.zeros(npar, np.double)
y[ipfix] = pfix
y[ivar ] = p
ye[ivar] = pe
y = np.array([np.clip(xx, l[0], l[1]) for xx,l in zip(y,limits)])
return y, ye, res
def step_par(mod, p0, par1, par2=None, **kwargs):
"""Step a parameter thorugh an array and record the change in the
likelihood function, fitting other parameters each time.
It can be used to calculate the uncertainties of some parameters
Args:
mod: model whose likelihood is to be optimized
p0: starting model parameters
par1: [ipar, p_array], where ipar is the index of the parameter
in p0 to step through, and p_array is the array of parameters
to use
par1: similar to par1 to do two parameters. Default is None, so we
only do one parameter
Keywords;
verbose: if True, print progress
limits: a list of [pmin, pmax] values for the limits on the parameters.
to be passed to @maximize
Returns:
step, [pbest, pbest_e, lbest] where:
step: (n, 2) array with parameter value and loglikelihood
pbest, pbest_e, lbest: parameter list, errors and the best loglikelihood
values.
"""
verbose = kwargs.get('verbose', True)
limits = kwargs.get('limits', None) # used for maximize
# find best fit first
pbest, pbest_e, res = maximize(mod, p0, limits, verbose=False)
lbest = -res.fun
if verbose: print('best loglikelihood: %10.6g'%lbest)
ip1 = par1[0]
step = []
for iip1,p1 in enumerate(par1[1]):
p = np.array(pbest)
p[ip1] = p1
if not par2 is None:
ip2 = par2[0]
step2 = []
for iip2,p2 in enumerate(par2[1]):
pp = np.array(p)
pp[ip2] = p2
res = maximize(mod, pp, limits, ipfix=[ip1, ip2], verbose=False)
step2.append([p1, p2, -res[2].fun])
if verbose:
print('%10.3g %10.3g %10.6g %10.3g\r'%(tuple(step2[-1])+(np.round(lbest - step2[-1][-1], 2),)), end="")
step.append(step2)
else:
res = maximize(mod, p, limits, ipfix=[ip1], verbose=False)
step.append([p1, -res[2].fun])
if verbose:
print('%10.3g %10.6g %10.3g\r'%(tuple(step[-1])+(np.round(lbest - step[-1][-1], 2),)), end="")
step = np.array(step)
return step, [pbest, pbest_e, lbest]
def errors(mod, p0, ipars=None, **kwargs):
"""Calculate the uncertainties in the parameters p0 that maximize
the likelihood function of a model mod.
For each parameter, the value is changed in small steps until the log-likelihood
changes by DCHI2 (default is 1, to calculated the 1-sigma uncertainties)
Args:
mod: model whose log-likelihood can called as mod.loglikelihood
p0: the model parameters that maximize the likelihood, obtained
for example by running @misc.maximize
ipars: a list of indices of p0 for which the errors are to be calculated
Default: None, means calculate errors for all parameters
Keywords:
limits: a list of [pmin, pmax] values for the limits on the parameters.
to be passed to @maximize
tol: tolerance in loglikelihood value. e.g. calculation stop when
|Delta(loglikelihood) - DCHI2| < tol. Default: 1e-2
DCHI2: the change in loglikelihood value to probe. DCHI2=1 gives ~1-sigma
uncertainties. For 90% confidence for instance, use DCHI2=2.71.
skip_ipars: Parameter indices to skip in calculating the errors. Usefull
for example in combination with ipars=None above.
sign: the direction of the parameter uncertainty search. Default:1 means
increase the parameter until Delta(loglikelihood)=DCHI2. -1 means seach
in the other direction. Doing one direction assumes gaussian uncertainties.
If the assumption breaks, both both +1 and -1 uncertainties should be reported.
verbose: True to print progress
"""
# limits on the parameters; act like uniform priors
limits = kwargs.get('limits', None)
# tolerance #
tol = kwargs.get('tol', 1e-2)
# a measure of the confidence level in the uncertainty.
DCHI2 = kwargs.get('DCHI2', 1.0)
# parameter indices to skip
skip_ipars = kwargs.get('skip_ipars', [])
# direction search for uncertainties.
sign = kwargs.get('sign', 1)
# printing progress?
verbose = kwargs.get('verbose', True)
# do all parameters if ipars=None
npar = len(p0)
if ipars is None:
ipars = list(range(npar))
ipars = [i for i in ipars]
# make sure we start the search from parameters that maximize the likelihood
pbest, pbest_e, res = maximize(mod, np.array(p0), limits)
lbest = -res.fun
# loop through the parameters. If a new maximum is found,
# restart the whole search
p, pe = np.array(pbest), np.array(pbest_e)
for iipar, ipar in enumerate(ipars):
if iipar in skip_ipars:
continue
if verbose: print('\t## errors for param %d ##'%ipar)
# make sure DlogL is enclosed in the search region,
# which is defined by integer multiples of pe
isig, pExtrm, dchi2 = 0.5, p[ipar], 0
not_bound = False
while (dchi2 - DCHI2) < tol*2:
pExtrm = p[ipar] + isig*sign*pe[ipar]
tmpp = np.array(p)
tmpp[ipar] = pExtrm
tmp_res = maximize(mod, tmpp, limits, ipfix=[ipar], verbose=False)
dchi2 = 2*(lbest - (-tmp_res[2].fun))
if dchi2 < (-2*tol):
if verbose:
print('@@ a new best fit is found looping ... @@')
print('%10.6g | %s \r'%(-tmp_res[2].fun,
' '.join(['%10.3g'%xx for xx in tmp_res[0]])), end="")
#ipars = ipars[iipar:] + ipars[:iipar]
return errors(mod, tmp_res[0], ipars, **kwargs)
isig += 0.5
if isig >= 10:
Warning(('parameter %d appears to be unbound using sign=%d\n'
'Try using sign=%d')%(ipar, sign, -sign))
pbest_e[ipar] = np.abs(pExtrm - pbest[ipar])
not_bound = True
break
if not_bound: continue
# -------------------------------------------------- #
## change tmpp[ipar] until dchi2==DCHI2 with tolerence TOL ##
pExtrm2 = p[ipar]
icount = 0
while np.abs(dchi2-DCHI2)>=tol:
icount += 1
pHalf = (pExtrm + pExtrm2)/2.
tmpp[ipar] = pHalf
tmp_res = maximize(mod, tmpp, limits, ipfix=[ipar], verbose=False)
dchi2 = 2*(lbest - (-tmp_res[2].fun))
if dchi2 < (-2*tol):
if verbose:
print('@@ a new best fit is found looping ... @@')
print('%10.6g | %s \r'%(-tmp_res[2].fun,
' '.join(['%10.3g'%xx for xx in tmp_res[0]])), end="")
#ipars = np.concatenate([ipars[iipar:], ipars[:iipar]])
return errors(mod, tmp_res[0], ipars, **kwargs)
if verbose:
print(' %10.6g %10.6g %10.6g %10.6g %10.6g\r'%(
lbest, -tmp_res[2].fun, p[ipar], pHalf, dchi2), end="")
if dchi2 < DCHI2:
pExtrm2 = pHalf
else:
pExtrm = pHalf
if icount >= 50: break
pbest_e[ipar] = np.abs(pHalf - pbest[ipar])
print()
# get fit result again, so the return is consistent with the return of maximize
res = maximize(mod, pbest, limits, verbose=False)
# finally, if DCHI2 != 1; scale the errors, so the return always corresponds to 1-sigma errors
error_scale = (st.norm.ppf((1-st.chi2.cdf(1, 1))/2) / st.norm.ppf((1-st.chi2.cdf(DCHI2, 1))/2))
pbest_e[ipars] = pbest_e[ipars] * error_scale
print('*'*20)
print(' '.join(['%10.3g'%xx for xx in pbest]))
print(' '.join(['%10.3g'%xx for xx in pbest_e]))
print('*'*20)
return pbest, pbest_e, res[2]
| [
"scipy.optimize.minimize",
"numpy.abs",
"numpy.random.randn",
"emcee.EnsembleSampler",
"numpy.zeros",
"numpy.expand_dims",
"numpy.clip",
"numpy.mean",
"numpy.array",
"numpy.diag",
"numpy.round",
"scipy.stats.chi2.cdf"
] | [((515, 527), 'numpy.array', 'np.array', (['p0'], {}), '(p0)\n', (523, 527), True, 'import numpy as np\n'), ((780, 792), 'numpy.array', 'np.array', (['g0'], {}), '(g0)\n', (788, 792), True, 'import numpy as np\n'), ((3139, 3197), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'logProb'], {'args': '[mod]'}), '(nwalkers, ndim, logProb, args=[mod])\n', (3160, 3197), False, 'import emcee\n'), ((4686, 4718), 'numpy.array', 'np.array', (['[p0[i] for i in ipfix]'], {}), '([p0[i] for i in ipfix])\n', (4694, 4718), True, 'import numpy as np\n'), ((6248, 6361), 'scipy.optimize.minimize', 'opt.minimize', (['f', 'p0[ivar]'], {'args': '(mod, info)', 'method': '"""BFGS"""', 'tol': '(0.0001)', 'jac': 'fprime', 'options': "{'gtol': 0.0001}"}), "(f, p0[ivar], args=(mod, info), method='BFGS', tol=0.0001, jac=\n fprime, options={'gtol': 0.0001})\n", (6260, 6361), True, 'import scipy.optimize as opt\n'), ((9086, 9100), 'numpy.array', 'np.array', (['step'], {}), '(step)\n', (9094, 9100), True, 'import numpy as np\n'), ((3347, 3383), 'numpy.mean', 'np.mean', (['sampler.acceptance_fraction'], {}), '(sampler.acceptance_fraction)\n', (3354, 3383), True, 'import numpy as np\n'), ((5007, 5032), 'numpy.zeros', 'np.zeros', (['npar', 'np.double'], {}), '(npar, np.double)\n', (5015, 5032), True, 'import numpy as np\n'), ((5614, 5639), 'numpy.zeros', 'np.zeros', (['npar', 'np.double'], {}), '(npar, np.double)\n', (5622, 5639), True, 'import numpy as np\n'), ((6643, 6668), 'numpy.zeros', 'np.zeros', (['npar', 'np.double'], {}), '(npar, np.double)\n', (6651, 6668), True, 'import numpy as np\n'), ((6670, 6695), 'numpy.zeros', 'np.zeros', (['npar', 'np.double'], {}), '(npar, np.double)\n', (6678, 6695), True, 'import numpy as np\n'), ((8270, 8285), 'numpy.array', 'np.array', (['pbest'], {}), '(pbest)\n', (8278, 8285), True, 'import numpy as np\n'), ((11553, 11565), 'numpy.array', 'np.array', (['p0'], {}), '(p0)\n', (11561, 11565), True, 'import numpy as np\n'), ((11703, 11718), 'numpy.array', 'np.array', (['pbest'], {}), '(pbest)\n', (11711, 11718), True, 'import numpy as np\n'), ((11720, 11737), 'numpy.array', 'np.array', (['pbest_e'], {}), '(pbest_e)\n', (11728, 11737), True, 'import numpy as np\n'), ((14301, 14328), 'numpy.abs', 'np.abs', (['(pHalf - pbest[ipar])'], {}), '(pHalf - pbest[ipar])\n', (14307, 14328), True, 'import numpy as np\n'), ((3418, 3444), 'numpy.expand_dims', 'np.expand_dims', (['lchain', '(-1)'], {}), '(lchain, -1)\n', (3432, 3444), True, 'import numpy as np\n'), ((6604, 6625), 'numpy.diag', 'np.diag', (['res.hess_inv'], {}), '(res.hess_inv)\n', (6611, 6625), True, 'import numpy as np\n'), ((6770, 6793), 'numpy.clip', 'np.clip', (['xx', 'l[0]', 'l[1]'], {}), '(xx, l[0], l[1])\n', (6777, 6793), True, 'import numpy as np\n'), ((12200, 12211), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (12208, 12211), True, 'import numpy as np\n'), ((13303, 13324), 'numpy.abs', 'np.abs', (['(dchi2 - DCHI2)'], {}), '(dchi2 - DCHI2)\n', (13309, 13324), True, 'import numpy as np\n'), ((2992, 3023), 'numpy.random.randn', 'np.random.randn', (['nwalkers', 'ndim'], {}), '(nwalkers, ndim)\n', (3007, 3023), True, 'import numpy as np\n'), ((3060, 3083), 'numpy.clip', 'np.clip', (['xx', 'l[0]', 'l[1]'], {}), '(xx, l[0], l[1])\n', (3067, 3083), True, 'import numpy as np\n'), ((4943, 4966), 'numpy.clip', 'np.clip', (['xx', 'l[0]', 'l[1]'], {}), '(xx, l[0], l[1])\n', (4950, 4966), True, 'import numpy as np\n'), ((5550, 5573), 'numpy.clip', 'np.clip', (['xx', 'l[0]', 'l[1]'], {}), '(xx, l[0], l[1])\n', (5557, 5573), True, 'import numpy as np\n'), ((8453, 8464), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8461, 8464), True, 'import numpy as np\n'), ((12994, 13022), 'numpy.abs', 'np.abs', (['(pExtrm - pbest[ipar])'], {}), '(pExtrm - pbest[ipar])\n', (13000, 13022), True, 'import numpy as np\n'), ((14622, 14639), 'scipy.stats.chi2.cdf', 'st.chi2.cdf', (['(1)', '(1)'], {}), '(1, 1)\n', (14633, 14639), True, 'import scipy.stats as st\n'), ((14661, 14682), 'scipy.stats.chi2.cdf', 'st.chi2.cdf', (['DCHI2', '(1)'], {}), '(DCHI2, 1)\n', (14672, 14682), True, 'import scipy.stats as st\n'), ((753, 765), 'numpy.array', 'np.array', (['p0'], {}), '(p0)\n', (761, 765), True, 'import numpy as np\n'), ((6525, 6540), 'numpy.abs', 'np.abs', (['res.jac'], {}), '(res.jac)\n', (6531, 6540), True, 'import numpy as np\n'), ((6156, 6165), 'numpy.abs', 'np.abs', (['g'], {}), '(g)\n', (6162, 6165), True, 'import numpy as np\n'), ((9029, 9062), 'numpy.round', 'np.round', (['(lbest - step[-1][-1])', '(2)'], {}), '(lbest - step[-1][-1], 2)\n', (9037, 9062), True, 'import numpy as np\n'), ((8733, 8767), 'numpy.round', 'np.round', (['(lbest - step2[-1][-1])', '(2)'], {}), '(lbest - step2[-1][-1], 2)\n', (8741, 8767), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script for benchmarking HadRGD (with no line search) against several commonly
used methods: PGD, Frank-Wolfe (no line search), Mirror Descent.
Using smaller problem sizes as several prior algorithms do not scale well.
Can toggle three solution types:
1. x_true in interior of simplex.
2. x_true on boundary, moderately sparse.
3. x_true is a corner, so 1-sparse.
"""
import numpy as np
from SimplexProjections import *
import time as time
import copt
from PGD_Variants import PGD
from utils import *
from projection_simplex import projection_simplex_pivot
from Riemannian_algs import RGD
from ExpDescentAlg_utils import EDA
import pickle as pkl
import matplotlib.pyplot as plt
num_dims = 10
num_trials_per_dim = 10
# initialize params and data storing matrices
tol = 1e-8
max_iters = 500
time_PGD_simplex = np.zeros((num_dims, num_trials_per_dim))
time_EDA = np.zeros((num_dims, num_trials_per_dim))
time_RGD_hadamard = np.zeros((num_dims, num_trials_per_dim))
time_PFW = np.zeros((num_dims, num_trials_per_dim))
num_iters_PGD_simplex = np.zeros((num_dims, num_trials_per_dim))
num_iters_EDA = np.zeros((num_dims, num_trials_per_dim))
num_iters_RGD_hadamard = np.zeros((num_dims, num_trials_per_dim))
num_iters_PFW = np.zeros((num_dims, num_trials_per_dim))
err_PGD_simplex = np.zeros((num_dims, num_trials_per_dim))
err_EDA = np.zeros((num_dims, num_trials_per_dim))
err_RGD_hadamard = np.zeros((num_dims, num_trials_per_dim))
err_PFW = np.zeros((num_dims, num_trials_per_dim))
sizes = []
for i in range(num_dims):
# Define parameters
n = 500 + 100*i
sizes.append(n)
m = int(np.ceil(0.1*n))
A = np.random.rand(m,n)
x_true = SampleSimplex(n) # Important: True Solution is in interior of Simplex.
# x_true = CondatProject(np.random.rand(n))
b = np.dot(A,x_true)
Lipschitz_constant = PowerMethod(A)
step_size = 1.99/Lipschitz_constant # agressive but convergence still guaranteed.
print('step_size is ' + str(step_size))
Had_step_size = np.sqrt(n)*np.sqrt(step_size) # Heuristic but we find this to work?
# Had_step_size = 10*step_size
# Define objective function and gradient
def cost(x):
'''
Least squares loss function.
'''
temp = np.dot(A,x) - b
return np.dot(temp,temp)/2
def cost_grad(x):
'''
Gradient of least squares loss function.
'''
temp = np.dot(A,x) - b
return np.dot(A.T,temp)
# Hadamard parametrization variants of objective function and gradient
def cost_H(z):
'''
Hadamard parametrized least squares cost. Mainly for use with autodiff.
'''
temp = np.dot(A,z*z) - b
return np.dot(temp, temp)
def cost_H_grad(z):
'''
Gradient of Hadamard parametrized least squares cost. For use in line search.
'''
temp = np.dot(A,z*z) - b
return np.dot(A.T,temp)*z
# Now perform num_trials_per_dim trials each
for j in range(num_trials_per_dim):
x0 = SampleSimplex(n) # Important: initialize in interior of simplex.
z0 = np.sqrt(x0) # initialization for Hadamard methods.
#z0 = np.random.randn(n)
#z0 = z0/np.linalg.norm(z0)
#x0 = z0*z0
# PGD on simplex using Duchi's Algorithm
start_time = time.time()
err, num_iters = PGD(cost, cost_grad, DuchiProject, step_size, x0, max_iters, tol)
time_PGD_simplex[i,j] = time.time() - start_time
err_PGD_simplex[i,j] = err
num_iters_PGD_simplex[i,j] = num_iters
print(err)
# RGD on sphere using Hadamard parametrization
start_time = time.time()
err, num_iters = RGD(cost_H, cost_H_grad, Had_step_size, z0, max_iters, tol)
time_RGD_hadamard[i,j] = time.time() - start_time
err_RGD_hadamard[i,j] = err
num_iters_RGD_hadamard[i,j] = num_iters
print(err)
# Pairwise Frank-Wolfe
# As FW on simplex is essentially a coordinate descent alg. it gets
# more iterations.
init_idx = np.random.randint(n)
x0 = np.zeros(n)
x0[init_idx] = 1.0
cb = copt.utils.Trace(cost)
sol = copt.minimize_frank_wolfe(cost, x0, LinMinOracle, x0_rep=init_idx,
variant='pairwise', jac=cost_grad,
step="DR", lipschitz=Lipschitz_constant,
callback=cb, verbose=True, max_iter=int(np.ceil(np.sqrt(n)*max_iters)))
success_idx = FindFirstLessThan(cb.trace_fx, tol)
print(success_idx)
print(cb.trace_fx[success_idx])
time_PFW[i, j] = cb.trace_time[success_idx]
err_PFW[i, j] = cb.trace_fx[success_idx]
num_iters_PFW[i, j] = success_idx
# Exponential/Mirror descent
start_time = time.time()
step_size_EDA = 20/Lipschitz_constant
err, num_iter = EDA(cost, cost_grad, step_size_EDA, z0, int(np.ceil(np.sqrt(n)*max_iters)), tol)
time_EDA[i,j] = time.time() - start_time
err_EDA[i,j] = err
num_iters_EDA[i, j] = num_iter
print(num_iter)
print(err)
#PGD_simplex = np.mean(time_PGD_simplex, axis=1)
#PGD_hadamard = np.mean(time_PGD_hadamard, axis=1)
#RGD_hadamard = np.mean(time_RGD_hadamard, axis=1)
#FW = np.mean(time_FW, axis=1)
# EDA = np.mean(time_EDA, axis=1)
#PGD_simplex_iters = np.mean(num_iters_PGD_simplex, axis=1)
#PGD_hadamard_iters = np.mean(num_iters_PGD_hadamard, axis=1)
#RGD_hadamard_iters = np.mean(num_iters_RGD_hadamard, axis=1)
#plt.plot(sizes, PGD_simplex, label="PGD on simplex")
#plt.plot(sizes, PGD_hadamard, label="PGD using Hadamard")
#plt.plot(sizes, RGD_hadamard, label="HadRGD")
#plt.plot(sizes, FW, label="Frank-Wolfe")
#plt.plot(sizes, EDA, label="Exponential Descent Algorithm")
#plt.legend()
#plt.show()
#plt.savefig('LeastSquares_Interior_Time_Aug_27.png')
#plt.clf()
#plt.plot(sizes, PGD_simplex_iters, label="PGD on simplex")
#plt.plot(sizes, PGD_hadamard_iters, label="PGD using Hadamard")
#plt.plot(sizes, RGD_hadamard_iters, label="RGD using Hadamard")
#plt.plot(sizes, FW, label="Frank-Wolfe")
#plt.plot(sizes, EDA, label="Exponential Descent Algorithm")
#plt.legend()
#plt.show()
#plt.savefig('LeastSquares_Interior_Iters_Aug_27.png')
myFile = open('Results/LeastSquaresBenchmarkResultsInterior_Oct_12.p', 'wb')
results = {"time_PGD_simplex":time_PGD_simplex,
"time_RGD_hadamard": time_RGD_hadamard,
"time_EDA": time_EDA,
"time_PFW":time_PFW,
"err_PGD_simplex": err_PGD_simplex,
"err_EDA": err_EDA,
"err_RGD_hadamard": err_RGD_hadamard,
"err_PFW": err_PFW,
"num_iters_PGD_simplex": num_iters_PGD_simplex,
"num_iters_EDA": num_iters_EDA,
"num_iters_RGD_hadamard": num_iters_RGD_hadamard,
"num_iters_PFW:": num_iters_PFW,
"sizes": sizes
}
pkl.dump(results, myFile)
myFile.close()
| [
"pickle.dump",
"numpy.ceil",
"Riemannian_algs.RGD",
"numpy.zeros",
"copt.utils.Trace",
"time.time",
"numpy.random.randint",
"PGD_Variants.PGD",
"numpy.random.rand",
"numpy.dot",
"numpy.sqrt"
] | [((878, 918), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (886, 918), True, 'import numpy as np\n'), ((930, 970), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (938, 970), True, 'import numpy as np\n'), ((991, 1031), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (999, 1031), True, 'import numpy as np\n'), ((1043, 1083), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1051, 1083), True, 'import numpy as np\n'), ((1109, 1149), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1117, 1149), True, 'import numpy as np\n'), ((1166, 1206), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1174, 1206), True, 'import numpy as np\n'), ((1232, 1272), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1240, 1272), True, 'import numpy as np\n'), ((1289, 1329), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1297, 1329), True, 'import numpy as np\n'), ((1349, 1389), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1357, 1389), True, 'import numpy as np\n'), ((1400, 1440), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1408, 1440), True, 'import numpy as np\n'), ((1460, 1500), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1468, 1500), True, 'import numpy as np\n'), ((1511, 1551), 'numpy.zeros', 'np.zeros', (['(num_dims, num_trials_per_dim)'], {}), '((num_dims, num_trials_per_dim))\n', (1519, 1551), True, 'import numpy as np\n'), ((7084, 7109), 'pickle.dump', 'pkl.dump', (['results', 'myFile'], {}), '(results, myFile)\n', (7092, 7109), True, 'import pickle as pkl\n'), ((1695, 1715), 'numpy.random.rand', 'np.random.rand', (['m', 'n'], {}), '(m, n)\n', (1709, 1715), True, 'import numpy as np\n'), ((1855, 1872), 'numpy.dot', 'np.dot', (['A', 'x_true'], {}), '(A, x_true)\n', (1861, 1872), True, 'import numpy as np\n'), ((1671, 1687), 'numpy.ceil', 'np.ceil', (['(0.1 * n)'], {}), '(0.1 * n)\n', (1678, 1687), True, 'import numpy as np\n'), ((2062, 2072), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2069, 2072), True, 'import numpy as np\n'), ((2073, 2091), 'numpy.sqrt', 'np.sqrt', (['step_size'], {}), '(step_size)\n', (2080, 2091), True, 'import numpy as np\n'), ((2502, 2519), 'numpy.dot', 'np.dot', (['A.T', 'temp'], {}), '(A.T, temp)\n', (2508, 2519), True, 'import numpy as np\n'), ((2775, 2793), 'numpy.dot', 'np.dot', (['temp', 'temp'], {}), '(temp, temp)\n', (2781, 2793), True, 'import numpy as np\n'), ((3181, 3192), 'numpy.sqrt', 'np.sqrt', (['x0'], {}), '(x0)\n', (3188, 3192), True, 'import numpy as np\n'), ((3401, 3412), 'time.time', 'time.time', ([], {}), '()\n', (3410, 3412), True, 'import time as time\n'), ((3438, 3503), 'PGD_Variants.PGD', 'PGD', (['cost', 'cost_grad', 'DuchiProject', 'step_size', 'x0', 'max_iters', 'tol'], {}), '(cost, cost_grad, DuchiProject, step_size, x0, max_iters, tol)\n', (3441, 3503), False, 'from PGD_Variants import PGD\n'), ((3747, 3758), 'time.time', 'time.time', ([], {}), '()\n', (3756, 3758), True, 'import time as time\n'), ((3784, 3843), 'Riemannian_algs.RGD', 'RGD', (['cost_H', 'cost_H_grad', 'Had_step_size', 'z0', 'max_iters', 'tol'], {}), '(cost_H, cost_H_grad, Had_step_size, z0, max_iters, tol)\n', (3787, 3843), False, 'from Riemannian_algs import RGD\n'), ((4169, 4189), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (4186, 4189), True, 'import numpy as np\n'), ((4203, 4214), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (4211, 4214), True, 'import numpy as np\n'), ((4255, 4277), 'copt.utils.Trace', 'copt.utils.Trace', (['cost'], {}), '(cost)\n', (4271, 4277), False, 'import copt\n'), ((4972, 4983), 'time.time', 'time.time', ([], {}), '()\n', (4981, 4983), True, 'import time as time\n'), ((2309, 2321), 'numpy.dot', 'np.dot', (['A', 'x'], {}), '(A, x)\n', (2315, 2321), True, 'import numpy as np\n'), ((2340, 2358), 'numpy.dot', 'np.dot', (['temp', 'temp'], {}), '(temp, temp)\n', (2346, 2358), True, 'import numpy as np\n'), ((2471, 2483), 'numpy.dot', 'np.dot', (['A', 'x'], {}), '(A, x)\n', (2477, 2483), True, 'import numpy as np\n'), ((2742, 2758), 'numpy.dot', 'np.dot', (['A', '(z * z)'], {}), '(A, z * z)\n', (2748, 2758), True, 'import numpy as np\n'), ((2944, 2960), 'numpy.dot', 'np.dot', (['A', '(z * z)'], {}), '(A, z * z)\n', (2950, 2960), True, 'import numpy as np\n'), ((2977, 2994), 'numpy.dot', 'np.dot', (['A.T', 'temp'], {}), '(A.T, temp)\n', (2983, 2994), True, 'import numpy as np\n'), ((3536, 3547), 'time.time', 'time.time', ([], {}), '()\n', (3545, 3547), True, 'import time as time\n'), ((3877, 3888), 'time.time', 'time.time', ([], {}), '()\n', (3886, 3888), True, 'import time as time\n'), ((5159, 5170), 'time.time', 'time.time', ([], {}), '()\n', (5168, 5170), True, 'import time as time\n'), ((5106, 5116), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (5113, 5116), True, 'import numpy as np\n'), ((4604, 4614), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (4611, 4614), True, 'import numpy as np\n')] |
""" This file defines policy optimization for a tensorflow policy. """
import copy
import json
import logging
import os
import pickle
import sys
import tempfile
import time
import traceback
import numpy as np
import tensorflow as tf
from gps.algorithm.policy_opt.config import POLICY_OPT_TF
from gps.algorithm.policy_opt.policy_opt import PolicyOpt
#from gps.algorithm.policy.tf_policy import TfPolicy
from policy_hooks.utils.tf_utils import TfSolver
from policy_hooks.utils.policy_solver_utils import *
from policy_hooks.tf_policy import TfPolicy
MAX_UPDATE_SIZE = 10000
SCOPE_LIST = ['primitive', 'cont', 'label']
class ControlAttentionPolicyOpt(PolicyOpt):
""" Policy optimization using tensor flow for DAG computations/nonlinear function approximation. """
def __init__(self, hyperparams, dO, dU, dPrimObs, dContObs, dValObs, primBounds, contBounds=None, inputs=None):
global tf
import tensorflow as tf
self.scope = hyperparams['scope'] if 'scope' in hyperparams else None
# tf.reset_default_graph()
config = copy.deepcopy(POLICY_OPT_TF)
config.update(hyperparams)
self.split_nets = hyperparams.get('split_nets', False)
self.valid_scopes = ['control'] if not self.split_nets else list(config['task_list'])
PolicyOpt.__init__(self, config, dO, dU)
tf.set_random_seed(self._hyperparams['random_seed'])
self.tf_iter = 0
self.batch_size = self._hyperparams['batch_size']
self.load_all = self._hyperparams.get('load_all', False)
self.input_layer = inputs
self.share_buffers = self._hyperparams.get('share_buffer', True)
if self._hyperparams.get('share_buffer', True):
self.buffers = self._hyperparams['buffers']
self.buf_sizes = self._hyperparams['buffer_sizes']
auxBounds = self._hyperparams.get('aux_boundaries', [])
self._dPrim = max([b[1] for b in primBounds] + [b[1] for b in auxBounds])
self._dCont = max([b[1] for b in contBounds]) if contBounds is not None and len(contBounds) else 0
self._dPrimObs = dPrimObs
self._dContObs = dContObs
self._dValObs = dValObs
self._primBounds = primBounds
self._contBounds = contBounds if contBounds is not None else []
self.load_label = self._hyperparams['load_label']
self.task_map = {}
self.device_string = "/cpu:0"
if self._hyperparams['use_gpu'] == 1:
self.gpu_device = self._hyperparams['gpu_id']
self.device_string = "/gpu:" + str(self.gpu_device)
self.act_op = None # mu_hat
self.feat_op = None # features
self.loss_scalar = None
self.obs_tensor = None
self.precision_tensor = None
self.action_tensor = None # mu true
self.solver = None
self.feat_vals = None
self.init_network()
self.init_solver()
self.var = {task: self._hyperparams['init_var'] * np.ones(dU) for task in self.task_map}
self.var[""] = self._hyperparams['init_var'] * np.ones(dU)
self.distilled_var = self._hyperparams['init_var'] * np.ones(dU)
self.weight_dir = self._hyperparams['weight_dir']
self.scope = self._hyperparams['scope'] if 'scope' in self._hyperparams else None
self.last_pkl_t = time.time()
self.cur_pkl = 0
self.gpu_fraction = self._hyperparams['gpu_fraction']
if not self._hyperparams['allow_growth']:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.gpu_fraction)
else:
gpu_options = tf.GPUOptions(allow_growth=True)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
init_op = tf.initialize_all_variables()
self.sess.run(init_op)
self.init_policies(dU)
llpol = hyperparams.get('ll_policy', '')
hlpol = hyperparams.get('hl_policy', '')
contpol = hyperparams.get('cont_policy', '')
scopes = self.valid_scopes + SCOPE_LIST if self.scope is None else [self.scope]
for scope in scopes:
if len(llpol) and scope in self.valid_scopes:
self.restore_ckpt(scope, dirname=llpol)
if len(hlpol) and scope not in self.valid_scopes:
self.restore_ckpt(scope, dirname=hlpol)
if len(contpol) and scope not in self.valid_scopes:
self.restore_ckpt(scope, dirname=contpol)
# List of indices for state (vector) data and image (tensor) data in observation.
self.x_idx, self.img_idx, i = [], [], 0
if 'obs_image_data' not in self._hyperparams['network_params']:
self._hyperparams['network_params'].update({'obs_image_data': []})
for sensor in self._hyperparams['network_params']['obs_include']:
dim = self._hyperparams['network_params']['sensor_dims'][sensor]
if sensor in self._hyperparams['network_params']['obs_image_data']:
self.img_idx = self.img_idx + list(range(i, i+dim))
else:
self.x_idx = self.x_idx + list(range(i, i+dim))
i += dim
self.prim_x_idx, self.prim_img_idx, i = [], [], 0
for sensor in self._hyperparams['primitive_network_params']['obs_include']:
dim = self._hyperparams['primitive_network_params']['sensor_dims'][sensor]
if sensor in self._hyperparams['primitive_network_params']['obs_image_data']:
self.prim_img_idx = self.prim_img_idx + list(range(i, i+dim))
else:
self.prim_x_idx = self.prim_x_idx + list(range(i, i+dim))
i += dim
self.cont_x_idx, self.cont_img_idx, i = [], [], 0
for sensor in self._hyperparams['cont_network_params']['obs_include']:
dim = self._hyperparams['cont_network_params']['sensor_dims'][sensor]
if sensor in self._hyperparams['cont_network_params']['obs_image_data']:
self.cont_img_idx = self.cont_img_idx + list(range(i, i+dim))
else:
self.cont_x_idx = self.cont_x_idx + list(range(i, i+dim))
i += dim
self.label_x_idx, self.label_img_idx, i = [], [], 0
for sensor in self._hyperparams['label_network_params']['obs_include']:
dim = self._hyperparams['label_network_params']['sensor_dims'][sensor]
if sensor in self._hyperparams['label_network_params']['obs_image_data']:
self.label_img_idx = self.label_img_idx + list(range(i, i+dim))
else:
self.label_x_idx = self.label_x_idx + list(range(i, i+dim))
i += dim
self.update_count = 0
if self.scope in ['primitive', 'cont']:
self.update_size = self._hyperparams['prim_update_size']
else:
self.update_size = self._hyperparams['update_size']
self.update_size *= (1 + self._hyperparams.get('permute_hl', 0))
self.train_iters = 0
self.average_losses = []
self.average_val_losses = []
self.average_error = []
self.N = 0
self.n_updates = 0
self.lr_scale = 0.9975
self.lr_policy = 'fixed'
self._hyperparams['iterations'] = MAX_UPDATE_SIZE // self.batch_size + 1
def restore_ckpts(self, label=None):
success = False
for scope in self.valid_scopes + SCOPE_LIST:
success = success or self.restore_ckpt(scope, label)
return success
def restore_ckpt(self, scope, label=None, dirname=''):
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope+'/')
if not len(variables): return False
self.saver = tf.train.Saver(variables)
ext = ''
if label is not None:
ext = '_{0}'.format(label)
success = True
if not len(dirname):
dirname = self.weight_dir
try:
if dirname[-1] == '/':
dirname = dirname[:-1]
self.saver.restore(self.sess, 'tf_saved/'+dirname+'/'+scope+'{0}.ckpt'.format(ext))
if scope in self.task_map:
self.task_map[scope]['policy'].scale = np.load('tf_saved/'+dirname+'/'+scope+'_scale{0}.npy'.format(ext))
self.task_map[scope]['policy'].bias = np.load('tf_saved/'+dirname+'/'+scope+'_bias{0}.npy'.format(ext))
#self.var[scope] = np.load('tf_saved/'+dirname+'/'+scope+'_variance{0}.npy'.format(ext))
#self.task_map[scope]['policy'].chol_pol_covar = np.diag(np.sqrt(self.var[scope]))
self.write_shared_weights([scope])
print(('Restored', scope, 'from', dirname))
except Exception as e:
print(('Could not restore', scope, 'from', dirname))
print(e)
success = False
return success
def write_shared_weights(self, scopes=None):
if scopes is None:
scopes = self.valid_scopes + SCOPE_LIST
for scope in scopes:
wts = self.serialize_weights([scope])
with self.buf_sizes[scope].get_lock():
self.buf_sizes[scope].value = len(wts)
self.buffers[scope][:len(wts)] = wts
def read_shared_weights(self, scopes=None):
if scopes is None:
scopes = self.valid_scopes + SCOPE_LIST
for scope in scopes:
start_t = time.time()
skip = False
with self.buf_sizes[scope].get_lock():
if self.buf_sizes[scope].value == 0: skip = True
wts = self.buffers[scope][:self.buf_sizes[scope].value]
wait_t = time.time() - start_t
if wait_t > 0.1 and scope == 'primitive': print('Time waiting on lock:', wait_t)
#if self.buf_sizes[scope].value == 0: skip = True
#wts = self.buffers[scope][:self.buf_sizes[scope].value]
if skip: continue
try:
self.deserialize_weights(wts)
except Exception as e:
#traceback.print_exception(*sys.exc_info())
if not skip:
print(e)
print('Could not load {0} weights from {1}'.format(scope, self.scope), e)
def serialize_weights(self, scopes=None, save=True):
if scopes is None:
scopes = self.valid_scopes + SCOPE_LIST
var_to_val = {}
for scope in scopes:
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope+'/')
for v in variables:
var_to_val[v.name] = self.sess.run(v).tolist()
scales = {task: self.task_map[task]['policy'].scale.tolist() for task in scopes if task in self.task_map}
biases = {task: self.task_map[task]['policy'].bias.tolist() for task in scopes if task in self.task_map}
if hasattr(self, 'prim_policy') and 'primitive' in scopes:
scales['primitive'] = self.prim_policy.scale.tolist()
biases['primitive'] = self.prim_policy.bias.tolist()
if hasattr(self, 'cont_policy') and 'cont' in scopes:
scales['cont'] = self.cont_policy.scale.tolist()
biases['cont'] = self.cont_policy.bias.tolist()
#variances = {task: self.var[task].tolist() for task in scopes if task in self.task_map}
variances = {}
scales[''] = []
biases[''] = []
variances[''] = []
if save: self.store_scope_weights(scopes=scopes)
return pickle.dumps([scopes, var_to_val, scales, biases, variances])
def deserialize_weights(self, json_wts, save=False):
scopes, var_to_val, scales, biases, variances = pickle.loads(json_wts)
for scope in scopes:
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope+'/')
for var in variables:
var.load(var_to_val[var.name], session=self.sess)
if scope == 'primitive' and hasattr(self, 'prim_policy'):
self.prim_policy.scale = np.array(scales[scope])
self.prim_policy.bias = np.array(biases[scope])
if scope == 'cont' and hasattr(self, 'cont_policy'):
self.cont_policy.scale = np.array(scales[scope])
self.cont_policy.bias = np.array(biases[scope])
if scope not in self.valid_scopes: continue
# if save:
# np.save('tf_saved/'+self.weight_dir+'/control'+'_scale', scales['control'])
# np.save('tf_saved/'+self.weight_dir+'/control'+'_bias', biases['control'])
# np.save('tf_saved/'+self.weight_dir+'/control'+'_variance', variances['control'])
#self.task_map[scope]['policy'].chol_pol_covar = np.diag(np.sqrt(np.array(variances[scope])))
self.task_map[scope]['policy'].scale = np.array(scales[scope])
self.task_map[scope]['policy'].bias = np.array(biases[scope])
#self.var[scope] = np.array(variances[scope])
if save: self.store_scope_weights(scopes=scopes)
def update_weights(self, scope, weight_dir=None):
if weight_dir is None:
weight_dir = self.weight_dir
self.saver.restore(self.sess, 'tf_saved/'+weight_dir+'/'+scope+'.ckpt')
def store_scope_weights(self, scopes, weight_dir=None, lab=''):
if weight_dir is None:
weight_dir = self.weight_dir
for scope in scopes:
try:
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope+'/')
saver = tf.train.Saver(variables)
saver.save(self.sess, 'tf_saved/'+weight_dir+'/'+scope+'{0}.ckpt'.format(lab))
except:
print('Saving variables encountered an issue but it will not crash:')
traceback.print_exception(*sys.exc_info())
if scope in self.task_map:
policy = self.task_map[scope]['policy']
np.save('tf_saved/'+weight_dir+'/'+scope+'_scale{0}'.format(lab), policy.scale)
np.save('tf_saved/'+weight_dir+'/'+scope+'_bias{0}'.format(lab), policy.bias)
#np.save('tf_saved/'+weight_dir+'/'+scope+'_variance{0}'.format(lab), self.var[scope])
def store_weights(self, weight_dir=None):
if self.scope is None:
self.store_scope_weights(self.valid_scopes+SCOPE_LIST, weight_dir)
else:
self.store_scope_weights([self.scope], weight_dir)
def get_data(self):
return [self.mu, self.obs, self.prc, self.wt, self.val_mu, self.val_obs, self.val_prc, self.val_wt]
def update_lr(self):
if self.method == 'linear':
self.cur_lr *= self.lr_scale
self.cur_hllr *= self.lr_scale
def _create_network(self, name, info):
with tf.variable_scope(name):
self.etas[name] = tf.placeholder_with_default(1., shape=())
tf_map_generator = info['network_model']
info['network_params']['eta'] = self.etas[name]
#self.class_tensors[name] = tf.placeholder(shape=[None, 1], dtype='float32')
tf_map, fc_vars, last_conv_vars = tf_map_generator(dim_input=info['dO'], \
dim_output=info['dOut'], \
batch_size=info['batch_size'], \
network_config=info['network_params'], \
input_layer=info['input_layer'])
self.obs_tensors[name] = tf_map.get_input_tensor()
self.precision_tensors[name] = tf_map.get_precision_tensor()
self.action_tensors[name] = tf_map.get_target_output_tensor()
self.act_ops[name] = tf_map.get_output_op()
self.feat_ops[name] = tf_map.get_feature_op()
self.loss_scalars[name] = tf_map.get_loss_op()
self.fc_vars[name] = fc_vars
self.last_conv_vars[name] = last_conv_vars
def init_network(self):
""" Helper method to initialize the tf networks used """
input_tensor = None
if self.load_all or self.scope is None or 'primitive' == self.scope:
with tf.variable_scope('primitive'):
inputs = self.input_layer if 'primitive' == self.scope else None
self.primitive_eta = tf.placeholder_with_default(1., shape=())
tf_map_generator = self._hyperparams['primitive_network_model']
self.primitive_class_tensor = None
tf_map, fc_vars, last_conv_vars = tf_map_generator(dim_input=self._dPrimObs, \
dim_output=self._dPrim, \
batch_size=self.batch_size, \
network_config=self._hyperparams['primitive_network_params'], \
input_layer=inputs, \
eta=self.primitive_eta)
self.primitive_obs_tensor = tf_map.get_input_tensor()
self.primitive_precision_tensor = tf_map.get_precision_tensor()
self.primitive_action_tensor = tf_map.get_target_output_tensor()
self.primitive_act_op = tf_map.get_output_op()
self.primitive_feat_op = tf_map.get_feature_op()
self.primitive_loss_scalar = tf_map.get_loss_op()
self.primitive_fc_vars = fc_vars
self.primitive_last_conv_vars = last_conv_vars
self.primitive_aux_losses = tf_map.aux_loss_ops
# Setup the gradients
#self.primitive_grads = [tf.gradients(self.primitive_act_op[:,u], self.primitive_obs_tensor)[0] for u in range(self._dPrim)]
if (self.load_all or self.scope is None or 'cont' == self.scope) and len(self._contBounds):
with tf.variable_scope('cont'):
inputs = self.input_layer if 'cont' == self.scope else None
self.cont_eta = tf.placeholder_with_default(1., shape=())
tf_map_generator = self._hyperparams['cont_network_model']
tf_map, fc_vars, last_conv_vars = tf_map_generator(dim_input=self._dContObs, \
dim_output=self._dCont, \
batch_size=self.batch_size, \
network_config=self._hyperparams['cont_network_params'], \
input_layer=inputs, \
eta=self.cont_eta)
self.cont_obs_tensor = tf_map.get_input_tensor()
self.cont_precision_tensor = tf_map.get_precision_tensor()
self.cont_action_tensor = tf_map.get_target_output_tensor()
self.cont_act_op = tf_map.get_output_op()
self.cont_feat_op = tf_map.get_feature_op()
self.cont_loss_scalar = tf_map.get_loss_op()
self.cont_fc_vars = fc_vars
self.cont_last_conv_vars = last_conv_vars
self.cont_aux_losses = tf_map.aux_loss_ops
for scope in self.valid_scopes:
if self.scope is None or scope == self.scope:
with tf.variable_scope(scope):
self.task_map[scope] = {}
tf_map_generator = self._hyperparams['network_model']
tf_map, fc_vars, last_conv_vars = tf_map_generator(dim_input=self._dO, \
dim_output=self._dU, \
batch_size=self.batch_size, \
network_config=self._hyperparams['network_params'], \
input_layer=self.input_layer)
self.task_map[scope]['obs_tensor'] = tf_map.get_input_tensor()
self.task_map[scope]['precision_tensor'] = tf_map.get_precision_tensor()
self.task_map[scope]['action_tensor'] = tf_map.get_target_output_tensor()
self.task_map[scope]['act_op'] = tf_map.get_output_op()
self.task_map[scope]['feat_op'] = tf_map.get_feature_op()
self.task_map[scope]['loss_scalar'] = tf_map.get_loss_op()
self.task_map[scope]['fc_vars'] = fc_vars
self.task_map[scope]['last_conv_vars'] = last_conv_vars
# Setup the gradients
#self.task_map[scope]['grads'] = [tf.gradients(self.task_map[scope]['act_op'][:,u], self.task_map[scope]['obs_tensor'])[0] for u in range(self._dU)]
if (self.scope is None or 'label' == self.scope) and self.load_label:
with tf.variable_scope('label'):
inputs = self.input_layer if 'label' == self.scope else None
self.label_eta = tf.placeholder_with_default(1., shape=())
tf_map_generator = self._hyperparams['primitive_network_model']
self.label_class_tensor = None
tf_map, fc_vars, last_conv_vars = tf_map_generator(dim_input=self._dPrimObs, \
dim_output=2, \
batch_size=self.batch_size, \
network_config=self._hyperparams['label_network_params'], \
input_layer=inputs, \
eta=self.label_eta)
self.label_obs_tensor = tf_map.get_input_tensor()
self.label_precision_tensor = tf_map.get_precision_tensor()
self.label_action_tensor = tf_map.get_target_output_tensor()
self.label_act_op = tf_map.get_output_op()
self.label_feat_op = tf_map.get_feature_op()
self.label_loss_scalar = tf_map.get_loss_op()
self.label_fc_vars = fc_vars
self.label_last_conv_vars = last_conv_vars
self.label_aux_losses = tf_map.aux_loss_ops
# Setup the gradients
#self.primitive_grads = [tf.gradients(self.primitive_act_op[:,u], self.primitive_obs_tensor)[0] for u in range(self._dPrim)]
def init_solver(self):
""" Helper method to initialize the solver. """
self.dec_tensor = tf.placeholder('float', name='weight_dec')#tf.Variable(initial_value=self._hyperparams['prim_weight_decay'], name='weightdec')
if self.scope is None or 'primitive' == self.scope:
self.cur_hllr = self._hyperparams['hllr']
self.hllr_tensor = tf.Variable(initial_value=self._hyperparams['hllr'], name='hllr')
self.cur_dec = self._hyperparams['prim_weight_decay']
vars_to_opt = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='primitive/')
with tf.variable_scope('primitive'):
self.primitive_solver = TfSolver(loss_scalar=self.primitive_loss_scalar,
solver_name=self._hyperparams['solver_type'],
base_lr=self.hllr_tensor,
lr_policy=self._hyperparams['lr_policy'],
momentum=self._hyperparams['momentum'],
weight_decay=self.dec_tensor,#self._hyperparams['prim_weight_decay'],
#weight_decay=self._hyperparams['prim_weight_decay'],
fc_vars=self.primitive_fc_vars,
last_conv_vars=self.primitive_last_conv_vars,
vars_to_opt=vars_to_opt,
aux_losses=self.primitive_aux_losses)
if (self.scope is None or 'cont' == self.scope) and len(self._contBounds):
self.cur_hllr = self._hyperparams['hllr']
self.hllr_tensor = tf.Variable(initial_value=self._hyperparams['hllr'], name='hllr')
self.cur_dec = self._hyperparams['cont_weight_decay']
vars_to_opt = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='cont/')
with tf.variable_scope('cont'):
self.cont_solver = TfSolver(loss_scalar=self.cont_loss_scalar,
solver_name=self._hyperparams['solver_type'],
base_lr=self.hllr_tensor,
lr_policy=self._hyperparams['lr_policy'],
momentum=self._hyperparams['momentum'],
weight_decay=self.dec_tensor,
fc_vars=self.cont_fc_vars,
last_conv_vars=self.cont_last_conv_vars,
vars_to_opt=vars_to_opt,
aux_losses=self.cont_aux_losses)
self.lr_tensor = tf.Variable(initial_value=self._hyperparams['lr'], name='lr')
self.cur_lr = self._hyperparams['lr']
for scope in self.valid_scopes:
if self.scope is None or scope == self.scope:
self.cur_dec = self._hyperparams['weight_decay']
vars_to_opt = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope+'/')
with tf.variable_scope(scope):
self.task_map[scope]['solver'] = TfSolver(loss_scalar=self.task_map[scope]['loss_scalar'],
solver_name=self._hyperparams['solver_type'],
base_lr=self.lr_tensor,
lr_policy=self._hyperparams['lr_policy'],
momentum=self._hyperparams['momentum'],
#weight_decay=self.dec_tensor,#self._hyperparams['weight_decay'],
weight_decay=self._hyperparams['weight_decay'],
fc_vars=self.task_map[scope]['fc_vars'],
last_conv_vars=self.task_map[scope]['last_conv_vars'],
vars_to_opt=vars_to_opt)
if self.load_label and (self.scope is None or 'label' == self.scope):
self.cur_hllr = self._hyperparams['hllr']
self.hllr_tensor = tf.Variable(initial_value=self._hyperparams['hllr'], name='hllr')
self.cur_dec = self._hyperparams['prim_weight_decay']
vars_to_opt = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='label/')
with tf.variable_scope('label'):
self.label_solver = TfSolver(loss_scalar=self.label_loss_scalar,
solver_name=self._hyperparams['solver_type'],
base_lr=self.hllr_tensor,
lr_policy=self._hyperparams['lr_policy'],
momentum=self._hyperparams['momentum'],
weight_decay=self.dec_tensor,#self._hyperparams['prim_weight_decay'],
#weight_decay=self._hyperparams['prim_weight_decay'],
fc_vars=self.label_fc_vars,
last_conv_vars=self.label_last_conv_vars,
vars_to_opt=vars_to_opt,
aux_losses=self.label_aux_losses)
def get_policy(self, task):
if task == 'primitive': return self.prim_policy
if task == 'cont': return self.cont_policy
if task == 'label': return self.label_policy
return self.task_map[task]['policy']
def init_policies(self, dU):
if self.load_all or self.scope is None or self.scope == 'primitive':
self.prim_policy = TfPolicy(self._dPrim,
self.primitive_obs_tensor,
self.primitive_act_op,
self.primitive_feat_op,
np.zeros(self._dPrim),
self.sess,
self.device_string,
copy_param_scope=None,
normalize=False)
if (self.load_all or self.scope is None or self.scope == 'cont') and len(self._contBounds):
self.cont_policy = TfPolicy(self._dCont,
self.cont_obs_tensor,
self.cont_act_op,
self.cont_feat_op,
np.zeros(self._dCont),
self.sess,
self.device_string,
copy_param_scope=None,
normalize=False)
for scope in self.valid_scopes:
normalize = IM_ENUM not in self._hyperparams['network_params']['obs_include']
if self.scope is None or scope == self.scope:
self.task_map[scope]['policy'] = TfPolicy(dU,
self.task_map[scope]['obs_tensor'],
self.task_map[scope]['act_op'],
self.task_map[scope]['feat_op'],
np.zeros(dU),
self.sess,
self.device_string,
normalize=normalize,
copy_param_scope=None)
if self.load_label and (self.scope is None or self.scope == 'label'):
self.label_policy = TfPolicy(2,
self.label_obs_tensor,
self.label_act_op,
self.label_feat_op,
np.zeros(2),
self.sess,
self.device_string,
copy_param_scope=None,
normalize=False)
def task_acc(self, obs, tgt_mu, prc, piecewise=False, scalar=True):
acc = []
task = 'primitive'
for n in range(len(obs)):
distrs = self.task_distr(obs[n])
labels = []
for bound in self._primBounds:
labels.append(tgt_mu[n, bound[0]:bound[1]])
accs = []
for i in range(len(labels)):
#if prc[n][i] < 1e-3 or np.abs(np.max(labels[i])-np.min(labels[i])) < 1e-2:
# accs.append(1)
# continue
if np.argmax(distrs[i]) != np.argmax(labels[i]):
accs.append(0)
else:
accs.append(1)
if piecewise or not scalar:
acc.append(accs)
else:
acc.append(np.min(accs) * np.ones(len(accs)))
#acc += np.mean(accs) if piecewise else np.min(accs)
if scalar:
return np.mean(acc)
return np.mean(acc, axis=0)
def cont_task(self, obs, eta=1.):
if len(obs.shape) < 2:
obs = obs.reshape(1, -1)
vals = self.sess.run(self.cont_act_op, feed_dict={self.cont_obs_tensor:obs, self.cont_eta: eta, self.dec_tensor: self.cur_dec})[0].flatten()
res = []
for bound in self._contBounds:
res.append(vals[bound[0]:bound[1]])
return res
def task_distr(self, obs, eta=1.):
if len(obs.shape) < 2:
obs = obs.reshape(1, -1)
distr = self.sess.run(self.primitive_act_op, feed_dict={self.primitive_obs_tensor:obs, self.primitive_eta: eta, self.dec_tensor: self.cur_dec})[0].flatten()
res = []
for bound in self._primBounds:
res.append(distr[bound[0]:bound[1]])
return res
def label_distr(self, obs, eta=1.):
if len(obs.shape) < 2:
obs = obs.reshape(1, -1)
distr = self.sess.run(self.label_act_op, feed_dict={self.label_obs_tensor:obs, self.label_eta: eta, self.dec_tensor: self.cur_dec})[0]
return distr
def check_task_error(self, obs, mu):
err = 0.
for o in obs:
distrs = self.task_distr(o)
i = 0
for d in distrs:
ind1 = np.argmax(d)
ind2 = np.argmax(mu[i:i+len(d)])
if ind1 != ind2: err += 1./len(distrs)
i += len(d)
err /= len(obs)
self.average_error.append(err)
return err
def check_validation(self, obs, tgt_mu, tgt_prc, task="control"):
if task == 'primitive':
feed_dict = {self.primitive_obs_tensor: obs,
self.primitive_action_tensor: tgt_mu,
self.primitive_precision_tensor: tgt_prc,
self.dec_tensor: self.cur_dec}
val_loss = self.primitive_solver(feed_dict, self.sess, device_string=self.device_string, train=False)
elif task == 'cont':
feed_dict = {self.cont_obs_tensor: obs,
self.cont_action_tensor: tgt_mu,
self.cont_precision_tensor: tgt_prc,
self.dec_tensor: self.cur_dec}
val_loss = self.cont_solver(feed_dict, self.sess, device_string=self.device_string, train=False)
elif task == 'label':
feed_dict = {self.label_obs_tensor: obs,
self.label_action_tensor: tgt_mu,
self.label_precision_tensor: tgt_prc,
self.dec_tensor: self.cur_dec}
val_loss = self.label_solver(feed_dict, self.sess, device_string=self.device_string, train=False)
else:
feed_dict = {self.task_map[task]['obs_tensor']: obs,
self.task_map[task]['action_tensor']: tgt_mu,
self.task_map[task]['precision_tensor']: tgt_prc,
self.dec_tensor: self.cur_dec}
val_loss = self.task_map[task]['solver'](feed_dict, self.sess, device_string=self.device_string, train=False)
#self.average_val_losses.append(val_loss)
return val_loss
def update(self, task="control", check_val=False, aux=[]):
start_t = time.time()
average_loss = 0
for i in range(self._hyperparams['iterations']):
feed_dict = {self.hllr_tensor: self.cur_hllr} if task in ['label', 'cont', 'primitive'] else {self.lr_tensor: self.cur_lr}
feed_dict[self.dec_tensor] = self.cur_dec
if task in self.task_map:
solver = self.task_map[task]['solver']
elif task == 'primitive':
solver = self.primitive_solver
elif task == 'cont':
solver = self.cont_solver
elif task == 'label':
solver = self.label_solver
train_loss = solver(feed_dict, self.sess, device_string=self.device_string, train=True)[0]
average_loss += train_loss
self.tf_iter += self._hyperparams['iterations']
self.average_losses.append(average_loss / self._hyperparams['iterations'])
'''
# Optimize variance.
A = np.sum(tgt_prc_orig, 0) + 2 * NT * \
self._hyperparams['ent_reg'] * np.ones((dU, dU))
A = A / np.sum(tgt_wt)
self.var[task] = 1 / np.diag(A)
policy.chol_pol_covar = np.diag(np.sqrt(self.var[task]))
'''
def update_primitive_filter(self, obs, tgt_mu, tgt_prc, tgt_wt, check_val=False, aux=[]):
"""
Update policy.
Args:
obs: Numpy array of observations, N x T x dO.
tgt_mu: Numpy array of mean filter outputs, N x T x dP.
tgt_prc: Numpy array of precision matrices, N x T x dP x dP.
tgt_wt: Numpy array of weights, N x T.
Returns:
A tensorflow object with updated weights.
"""
N = obs.shape[0]
#tgt_wt *= (float(N) / np.sum(tgt_wt))
# Allow weights to be at most twice the robust median.
# mn = np.median(tgt_wt[(np.abs(tgt_wt) > 1e-3).nonzero()])
# for n in range(N):
# tgt_wt[n] = min(tgt_wt[n], 2 * mn)
# Robust median should be around one.
# tgt_wt /= mn
# Reshape inputs.
obs = np.reshape(obs, (N, -1))
tgt_mu = np.reshape(tgt_mu, (N, -1))
'''
tgt_prc = np.reshape(tgt_prc, (N, dP, dP))
tgt_wt = np.reshape(tgt_wt, (N, 1, 1))
# Fold weights into tgt_prc.
tgt_prc = tgt_wt * tgt_prc
'''
tgt_prc = tgt_prc * tgt_wt.reshape((N, 1)) #tgt_wt.flatten()
if len(aux): aux = aux.reshape((-1,1))
# Assuming that N*T >= self.batch_size.
batch_size = np.minimum(self.batch_size, N)
batches_per_epoch = np.maximum(np.floor(N / batch_size), 1)
idx = list(range(N))
average_loss = 0
np.random.shuffle(idx)
'''
if self._hyperparams['fc_only_iterations'] > 0:
feed_dict = {self.obs_tensor: obs}
num_values = obs.shape[0]
conv_values = self.primitive_solver.get_last_conv_values(self.sess, feed_dict, num_values, batch_size)
for i in range(self._hyperparams['fc_only_iterations'] ):
start_idx = int(i * batch_size %
(batches_per_epoch * batch_size))
idx_i = idx[start_idx:start_idx+batch_size]
feed_dict = {self.primitive_last_conv_vars: conv_values[idx_i],
self.primitive_action_tensor: tgt_mu[idx_i],
self.primitive_precision_tensor: tgt_prc[idx_i],
self.hllr_tensor: self.cur_hllr}
train_loss = self.primitive_solver(feed_dict, self.sess, device_string=self.device_string, train=(not check_val), use_fc_solver=True)
average_loss = 0
'''
# actual training.
# for i in range(self._hyperparams['iterations']):
for i in range(self._hyperparams['iterations']):
# Load in data for this batch.
self.train_iters += 1
start_idx = int(i * self.batch_size %
(batches_per_epoch * self.batch_size))
idx_i = idx[start_idx:start_idx+self.batch_size]
feed_dict = {self.primitive_obs_tensor: obs[idx_i],
self.primitive_action_tensor: tgt_mu[idx_i],
self.primitive_precision_tensor: tgt_prc[idx_i],
self.hllr_tensor: self.cur_hllr}
if len(aux) and self.primitive_class_tensor is not None:
feed_dict[self.primitive_class_tensor] = aux[idx_i]
train_loss = self.primitive_solver(feed_dict, self.sess, device_string=self.device_string, train=(not check_val))[0]
average_loss += train_loss
self.tf_iter += self._hyperparams['iterations']
if check_val:
self.average_val_losses.append(average_loss / self._hyperparams['iterations'])
else:
self.average_losses.append(average_loss / self._hyperparams['iterations'])
feed_dict = {self.obs_tensor: obs}
num_values = obs.shape[0]
#if self.primitive_feat_op is not None:
# self.primitive_feat_vals = self.primitive_solver.get_var_values(self.sess, self.primitive_feat_op, feed_dict, num_values, self.batch_size)
def traj_prob(self, obs, task="control"):
assert len(obs.shape) == 2 or obs.shape[0] == 1
mu, sig, prec, det_sig = self.prob(obs, task)
traj = np.tri(mu.shape[1]).dot(mu[0])
return np.array([traj]), sig, prec, det_sig
def policy_initialized(self, task):
if task in self.valid_scopes:
return self.task_map[task]['policy'].scale is not None
return self.task_map['control']['policy'].scale is not None
def prob(self, obs, task="control"):
"""
Run policy forward.
Args:
obs: Numpy array of observations that is N x T x dO.
"""
if len(obs.shape) < 3:
obs = obs.reshape((1, obs.shape[0], obs.shape[1]))
dU = self._dU
N, T = obs.shape[:2]
# Normalize obs.
if task not in self.valid_scopes:
task = "control"
if task in self.task_map:
policy = self.task_map[task]['policy']
else:
policy = getattr(self, '{0}_policy'.format(task))
if policy.scale is not None:
obs = obs.copy()
for n in range(N):
obs[n, :, self.x_idx] = (obs[n, :, self.x_idx].T.dot(policy.scale)
+ policy.bias).T
output = np.zeros((N, T, dU))
for i in range(N):
for t in range(T):
# Feed in data.
if task in self.task_map:
obs_tensor = self.task_map[task]['obs_tensor']
act_op = self.task_map[task]['act_op']
else:
obs_tensor = getattr(self, '{0}_obs_tensor'.format(task))
act_op = getattr(self, '{0}_act_op'.format(task))
feed_dict = {obs_tensor: np.expand_dims(obs[i, t], axis=0)}
# with tf.device(self.device_string):
# output[i, t, :] = self.sess.run(act_op, feed_dict=feed_dict)
output[i, t, :] = self.sess.run(act_op, feed_dict=feed_dict)
if task in self.var:
pol_sigma = np.tile(np.diag(self.var[task]), [N, T, 1, 1])
pol_prec = np.tile(np.diag(1.0 / self.var[task]), [N, T, 1, 1])
pol_det_sigma = np.tile(np.prod(self.var[task]), [N, T])
else:
var = getattr(self, '{0}_var'.format(task))
pol_sigma = np.tile(np.diag(var), [N, T, 1, 1])
pol_prec = np.tile(np.diag(1.0 / var), [N, T, 1, 1])
pol_det_sigma = np.tile(np.prod(var), [N, T])
return output, pol_sigma, pol_prec, pol_det_sigma
def set_ent_reg(self, ent_reg):
""" Set the entropy regularization. """
self._hyperparams['ent_reg'] = ent_reg
def save_model(self, fname):
# LOGGER.debug('Saving model to: %s', fname)
self.saver.save(self.sess, fname, write_meta_graph=False)
def restore_model(self, fname):
self.saver.restore(self.sess, fname)
# LOGGER.debug('Restoring model from: %s', fname)
# For pickling.
def __getstate__(self):
with tempfile.NamedTemporaryFile('w+b', delete=True) as f:
self.save_model(f.name) # TODO - is this implemented.
f.seek(0)
with open(f.name, 'r') as f2:
wts = f2.read()
return {
'hyperparams': self._hyperparams,
'dO': self._dO,
'dU': self._dU,
'scale': {task:self.task_map[task]['policy'].scale for task in self.task_map},
'bias': {task:self.task_map[task]['policy'].bias for task in self.task_map},
'tf_iter': self.tf_iter,
'x_idx': {task:self.task_map[task]['policy'].x_idx for task in self.task_map},
'chol_pol_covar': {task:self.task_map[task]['policy'].chol_pol_covar for task in self.task_map},
'wts': wts,
}
# For unpickling.
def __setstate__(self, state):
from tf.python.framework import ops
ops.reset_default_graph() # we need to destroy the default graph before re_init or checkpoint won't restore.
self.__init__(state['hyperparams'], state['dO'], state['dU'])
for task in self.task_map:
self.policy[task].scale = state['scale']
self.policy[task].bias = state['bias']
self.policy[task].x_idx = state['x_idx']
self.policy[task].chol_pol_covar = state['chol_pol_covar']
self.tf_iter = state['tf_iter']
with tempfile.NamedTemporaryFile('w+b', delete=True) as f:
f.write(state['wts'])
f.seek(0)
self.restore_model(f.name) | [
"numpy.argmax",
"tensorflow.get_collection",
"numpy.floor",
"numpy.ones",
"tensorflow.ConfigProto",
"tensorflow.Variable",
"numpy.mean",
"sys.exc_info",
"numpy.diag",
"tensorflow.GPUOptions",
"numpy.prod",
"gps.algorithm.policy_opt.policy_opt.PolicyOpt.__init__",
"tensorflow.placeholder_with... | [((1068, 1096), 'copy.deepcopy', 'copy.deepcopy', (['POLICY_OPT_TF'], {}), '(POLICY_OPT_TF)\n', (1081, 1096), False, 'import copy\n'), ((1299, 1339), 'gps.algorithm.policy_opt.policy_opt.PolicyOpt.__init__', 'PolicyOpt.__init__', (['self', 'config', 'dO', 'dU'], {}), '(self, config, dO, dU)\n', (1317, 1339), False, 'from gps.algorithm.policy_opt.policy_opt import PolicyOpt\n'), ((1349, 1401), 'tensorflow.set_random_seed', 'tf.set_random_seed', (["self._hyperparams['random_seed']"], {}), "(self._hyperparams['random_seed'])\n", (1367, 1401), True, 'import tensorflow as tf\n'), ((3332, 3343), 'time.time', 'time.time', ([], {}), '()\n', (3341, 3343), False, 'import time\n'), ((3770, 3799), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (3797, 3799), True, 'import tensorflow as tf\n'), ((7601, 7668), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': "(scope + '/')"}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope + '/')\n", (7618, 7668), True, 'import tensorflow as tf\n'), ((7732, 7757), 'tensorflow.train.Saver', 'tf.train.Saver', (['variables'], {}), '(variables)\n', (7746, 7757), True, 'import tensorflow as tf\n'), ((11526, 11587), 'pickle.dumps', 'pickle.dumps', (['[scopes, var_to_val, scales, biases, variances]'], {}), '([scopes, var_to_val, scales, biases, variances])\n', (11538, 11587), False, 'import pickle\n'), ((11703, 11725), 'pickle.loads', 'pickle.loads', (['json_wts'], {}), '(json_wts)\n', (11715, 11725), False, 'import pickle\n'), ((23003, 23045), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {'name': '"""weight_dec"""'}), "('float', name='weight_dec')\n", (23017, 23045), True, 'import tensorflow as tf\n'), ((25804, 25865), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': "self._hyperparams['lr']", 'name': '"""lr"""'}), "(initial_value=self._hyperparams['lr'], name='lr')\n", (25815, 25865), True, 'import tensorflow as tf\n'), ((32659, 32679), 'numpy.mean', 'np.mean', (['acc'], {'axis': '(0)'}), '(acc, axis=0)\n', (32666, 32679), True, 'import numpy as np\n'), ((35912, 35923), 'time.time', 'time.time', ([], {}), '()\n', (35921, 35923), False, 'import time\n'), ((37980, 38004), 'numpy.reshape', 'np.reshape', (['obs', '(N, -1)'], {}), '(obs, (N, -1))\n', (37990, 38004), True, 'import numpy as np\n'), ((38022, 38049), 'numpy.reshape', 'np.reshape', (['tgt_mu', '(N, -1)'], {}), '(tgt_mu, (N, -1))\n', (38032, 38049), True, 'import numpy as np\n'), ((38432, 38462), 'numpy.minimum', 'np.minimum', (['self.batch_size', 'N'], {}), '(self.batch_size, N)\n', (38442, 38462), True, 'import numpy as np\n'), ((38593, 38615), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (38610, 38615), True, 'import numpy as np\n'), ((42437, 42457), 'numpy.zeros', 'np.zeros', (['(N, T, dU)'], {}), '((N, T, dU))\n', (42445, 42457), True, 'import numpy as np\n'), ((45118, 45143), 'tf.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (45141, 45143), False, 'from tf.python.framework import ops\n'), ((3073, 3084), 'numpy.ones', 'np.ones', (['dU'], {}), '(dU)\n', (3080, 3084), True, 'import numpy as np\n'), ((3146, 3157), 'numpy.ones', 'np.ones', (['dU'], {}), '(dU)\n', (3153, 3157), True, 'import numpy as np\n'), ((3508, 3572), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'self.gpu_fraction'}), '(per_process_gpu_memory_fraction=self.gpu_fraction)\n', (3521, 3572), True, 'import tensorflow as tf\n'), ((3613, 3645), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (3626, 3645), True, 'import tensorflow as tf\n'), ((9424, 9435), 'time.time', 'time.time', ([], {}), '()\n', (9433, 9435), False, 'import time\n'), ((12878, 12901), 'numpy.array', 'np.array', (['scales[scope]'], {}), '(scales[scope])\n', (12886, 12901), True, 'import numpy as np\n'), ((12952, 12975), 'numpy.array', 'np.array', (['biases[scope]'], {}), '(biases[scope])\n', (12960, 12975), True, 'import numpy as np\n'), ((14843, 14866), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (14860, 14866), True, 'import tensorflow as tf\n'), ((14898, 14940), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': '()'}), '(1.0, shape=())\n', (14925, 14940), True, 'import tensorflow as tf\n'), ((23275, 23340), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': "self._hyperparams['hllr']", 'name': '"""hllr"""'}), "(initial_value=self._hyperparams['hllr'], name='hllr')\n", (23286, 23340), True, 'import tensorflow as tf\n'), ((23433, 23501), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""primitive/"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='primitive/')\n", (23450, 23501), True, 'import tensorflow as tf\n'), ((24737, 24802), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': "self._hyperparams['hllr']", 'name': '"""hllr"""'}), "(initial_value=self._hyperparams['hllr'], name='hllr')\n", (24748, 24802), True, 'import tensorflow as tf\n'), ((24895, 24958), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""cont/"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='cont/')\n", (24912, 24958), True, 'import tensorflow as tf\n'), ((27411, 27476), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': "self._hyperparams['hllr']", 'name': '"""hllr"""'}), "(initial_value=self._hyperparams['hllr'], name='hllr')\n", (27422, 27476), True, 'import tensorflow as tf\n'), ((27569, 27633), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""label/"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='label/')\n", (27586, 27633), True, 'import tensorflow as tf\n'), ((32631, 32643), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (32638, 32643), True, 'import numpy as np\n'), ((38502, 38526), 'numpy.floor', 'np.floor', (['(N / batch_size)'], {}), '(N / batch_size)\n', (38510, 38526), True, 'import numpy as np\n'), ((41353, 41369), 'numpy.array', 'np.array', (['[traj]'], {}), '([traj])\n', (41361, 41369), True, 'import numpy as np\n'), ((44222, 44269), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w+b"""'], {'delete': '(True)'}), "('w+b', delete=True)\n", (44249, 44269), False, 'import tempfile\n'), ((45615, 45662), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w+b"""'], {'delete': '(True)'}), "('w+b', delete=True)\n", (45642, 45662), False, 'import tempfile\n'), ((2979, 2990), 'numpy.ones', 'np.ones', (['dU'], {}), '(dU)\n', (2986, 2990), True, 'import numpy as np\n'), ((3684, 3750), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'allow_soft_placement': '(True)'}), '(gpu_options=gpu_options, allow_soft_placement=True)\n', (3698, 3750), True, 'import tensorflow as tf\n'), ((9671, 9682), 'time.time', 'time.time', ([], {}), '()\n', (9680, 9682), False, 'import time\n'), ((12071, 12094), 'numpy.array', 'np.array', (['scales[scope]'], {}), '(scales[scope])\n', (12079, 12094), True, 'import numpy as np\n'), ((12135, 12158), 'numpy.array', 'np.array', (['biases[scope]'], {}), '(biases[scope])\n', (12143, 12158), True, 'import numpy as np\n'), ((12266, 12289), 'numpy.array', 'np.array', (['scales[scope]'], {}), '(scales[scope])\n', (12274, 12289), True, 'import numpy as np\n'), ((12330, 12353), 'numpy.array', 'np.array', (['biases[scope]'], {}), '(biases[scope])\n', (12338, 12353), True, 'import numpy as np\n'), ((13616, 13641), 'tensorflow.train.Saver', 'tf.train.Saver', (['variables'], {}), '(variables)\n', (13630, 13641), True, 'import tensorflow as tf\n'), ((16313, 16343), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""primitive"""'], {}), "('primitive')\n", (16330, 16343), True, 'import tensorflow as tf\n'), ((16463, 16505), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': '()'}), '(1.0, shape=())\n', (16490, 16505), True, 'import tensorflow as tf\n'), ((18131, 18156), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cont"""'], {}), "('cont')\n", (18148, 18156), True, 'import tensorflow as tf\n'), ((18266, 18308), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': '()'}), '(1.0, shape=())\n', (18293, 18308), True, 'import tensorflow as tf\n'), ((21262, 21288), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""label"""'], {}), "('label')\n", (21279, 21288), True, 'import tensorflow as tf\n'), ((21400, 21442), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': '()'}), '(1.0, shape=())\n', (21427, 21442), True, 'import tensorflow as tf\n'), ((23519, 23549), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""primitive"""'], {}), "('primitive')\n", (23536, 23549), True, 'import tensorflow as tf\n'), ((23591, 23986), 'policy_hooks.utils.tf_utils.TfSolver', 'TfSolver', ([], {'loss_scalar': 'self.primitive_loss_scalar', 'solver_name': "self._hyperparams['solver_type']", 'base_lr': 'self.hllr_tensor', 'lr_policy': "self._hyperparams['lr_policy']", 'momentum': "self._hyperparams['momentum']", 'weight_decay': 'self.dec_tensor', 'fc_vars': 'self.primitive_fc_vars', 'last_conv_vars': 'self.primitive_last_conv_vars', 'vars_to_opt': 'vars_to_opt', 'aux_losses': 'self.primitive_aux_losses'}), "(loss_scalar=self.primitive_loss_scalar, solver_name=self.\n _hyperparams['solver_type'], base_lr=self.hllr_tensor, lr_policy=self.\n _hyperparams['lr_policy'], momentum=self._hyperparams['momentum'],\n weight_decay=self.dec_tensor, fc_vars=self.primitive_fc_vars,\n last_conv_vars=self.primitive_last_conv_vars, vars_to_opt=vars_to_opt,\n aux_losses=self.primitive_aux_losses)\n", (23599, 23986), False, 'from policy_hooks.utils.tf_utils import TfSolver\n'), ((24976, 25001), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cont"""'], {}), "('cont')\n", (24993, 25001), True, 'import tensorflow as tf\n'), ((25038, 25416), 'policy_hooks.utils.tf_utils.TfSolver', 'TfSolver', ([], {'loss_scalar': 'self.cont_loss_scalar', 'solver_name': "self._hyperparams['solver_type']", 'base_lr': 'self.hllr_tensor', 'lr_policy': "self._hyperparams['lr_policy']", 'momentum': "self._hyperparams['momentum']", 'weight_decay': 'self.dec_tensor', 'fc_vars': 'self.cont_fc_vars', 'last_conv_vars': 'self.cont_last_conv_vars', 'vars_to_opt': 'vars_to_opt', 'aux_losses': 'self.cont_aux_losses'}), "(loss_scalar=self.cont_loss_scalar, solver_name=self._hyperparams[\n 'solver_type'], base_lr=self.hllr_tensor, lr_policy=self._hyperparams[\n 'lr_policy'], momentum=self._hyperparams['momentum'], weight_decay=self\n .dec_tensor, fc_vars=self.cont_fc_vars, last_conv_vars=self.\n cont_last_conv_vars, vars_to_opt=vars_to_opt, aux_losses=self.\n cont_aux_losses)\n", (25046, 25416), False, 'from policy_hooks.utils.tf_utils import TfSolver\n'), ((26105, 26172), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': "(scope + '/')"}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope + '/')\n", (26122, 26172), True, 'import tensorflow as tf\n'), ((27651, 27677), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""label"""'], {}), "('label')\n", (27668, 27677), True, 'import tensorflow as tf\n'), ((27715, 28097), 'policy_hooks.utils.tf_utils.TfSolver', 'TfSolver', ([], {'loss_scalar': 'self.label_loss_scalar', 'solver_name': "self._hyperparams['solver_type']", 'base_lr': 'self.hllr_tensor', 'lr_policy': "self._hyperparams['lr_policy']", 'momentum': "self._hyperparams['momentum']", 'weight_decay': 'self.dec_tensor', 'fc_vars': 'self.label_fc_vars', 'last_conv_vars': 'self.label_last_conv_vars', 'vars_to_opt': 'vars_to_opt', 'aux_losses': 'self.label_aux_losses'}), "(loss_scalar=self.label_loss_scalar, solver_name=self._hyperparams[\n 'solver_type'], base_lr=self.hllr_tensor, lr_policy=self._hyperparams[\n 'lr_policy'], momentum=self._hyperparams['momentum'], weight_decay=self\n .dec_tensor, fc_vars=self.label_fc_vars, last_conv_vars=self.\n label_last_conv_vars, vars_to_opt=vars_to_opt, aux_losses=self.\n label_aux_losses)\n", (27723, 28097), False, 'from policy_hooks.utils.tf_utils import TfSolver\n'), ((29313, 29334), 'numpy.zeros', 'np.zeros', (['self._dPrim'], {}), '(self._dPrim)\n', (29321, 29334), True, 'import numpy as np\n'), ((29939, 29960), 'numpy.zeros', 'np.zeros', (['self._dCont'], {}), '(self._dCont)\n', (29947, 29960), True, 'import numpy as np\n'), ((31426, 31437), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (31434, 31437), True, 'import numpy as np\n'), ((33927, 33939), 'numpy.argmax', 'np.argmax', (['d'], {}), '(d)\n', (33936, 33939), True, 'import numpy as np\n'), ((41307, 41326), 'numpy.tri', 'np.tri', (['mu.shape[1]'], {}), '(mu.shape[1])\n', (41313, 41326), True, 'import numpy as np\n'), ((43239, 43262), 'numpy.diag', 'np.diag', (['self.var[task]'], {}), '(self.var[task])\n', (43246, 43262), True, 'import numpy as np\n'), ((43309, 43338), 'numpy.diag', 'np.diag', (['(1.0 / self.var[task])'], {}), '(1.0 / self.var[task])\n', (43316, 43338), True, 'import numpy as np\n'), ((43390, 43413), 'numpy.prod', 'np.prod', (['self.var[task]'], {}), '(self.var[task])\n', (43397, 43413), True, 'import numpy as np\n'), ((43525, 43537), 'numpy.diag', 'np.diag', (['var'], {}), '(var)\n', (43532, 43537), True, 'import numpy as np\n'), ((43584, 43602), 'numpy.diag', 'np.diag', (['(1.0 / var)'], {}), '(1.0 / var)\n', (43591, 43602), True, 'import numpy as np\n'), ((43654, 43666), 'numpy.prod', 'np.prod', (['var'], {}), '(var)\n', (43661, 43666), True, 'import numpy as np\n'), ((19645, 19669), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (19662, 19669), True, 'import tensorflow as tf\n'), ((26192, 26216), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (26209, 26216), True, 'import tensorflow as tf\n'), ((26271, 26673), 'policy_hooks.utils.tf_utils.TfSolver', 'TfSolver', ([], {'loss_scalar': "self.task_map[scope]['loss_scalar']", 'solver_name': "self._hyperparams['solver_type']", 'base_lr': 'self.lr_tensor', 'lr_policy': "self._hyperparams['lr_policy']", 'momentum': "self._hyperparams['momentum']", 'weight_decay': "self._hyperparams['weight_decay']", 'fc_vars': "self.task_map[scope]['fc_vars']", 'last_conv_vars': "self.task_map[scope]['last_conv_vars']", 'vars_to_opt': 'vars_to_opt'}), "(loss_scalar=self.task_map[scope]['loss_scalar'], solver_name=self.\n _hyperparams['solver_type'], base_lr=self.lr_tensor, lr_policy=self.\n _hyperparams['lr_policy'], momentum=self._hyperparams['momentum'],\n weight_decay=self._hyperparams['weight_decay'], fc_vars=self.task_map[\n scope]['fc_vars'], last_conv_vars=self.task_map[scope]['last_conv_vars'\n ], vars_to_opt=vars_to_opt)\n", (26279, 26673), False, 'from policy_hooks.utils.tf_utils import TfSolver\n'), ((30768, 30780), 'numpy.zeros', 'np.zeros', (['dU'], {}), '(dU)\n', (30776, 30780), True, 'import numpy as np\n'), ((32236, 32256), 'numpy.argmax', 'np.argmax', (['distrs[i]'], {}), '(distrs[i])\n', (32245, 32256), True, 'import numpy as np\n'), ((32260, 32280), 'numpy.argmax', 'np.argmax', (['labels[i]'], {}), '(labels[i])\n', (32269, 32280), True, 'import numpy as np\n'), ((42928, 42961), 'numpy.expand_dims', 'np.expand_dims', (['obs[i, t]'], {'axis': '(0)'}), '(obs[i, t], axis=0)\n', (42942, 42961), True, 'import numpy as np\n'), ((32493, 32505), 'numpy.min', 'np.min', (['accs'], {}), '(accs)\n', (32499, 32505), True, 'import numpy as np\n'), ((13886, 13900), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13898, 13900), False, 'import sys\n')] |
import numpy as np
import params
from CommClient import CommClient
from TFTrainer import TFTrainer as TR
def compute_norm(data):
return sum([np.sum(item**2) for item in data])
"""
@brief: 极坐标转欧氏坐标
@param [polar_coordinate]: 要转换的极坐标 | 都是用普通列表表示的坐标
@return: 转换结果(欧氏坐标)
"""
def polar2euclid(polar_coordinate):
return [polar_coordinate[0] * np.math.cos(polar_coordinate[1]), polar_coordinate[0] * np.math.sin(polar_coordinate[1])]
"""
@brief: 欧氏坐标转极坐标
@param [polar_coordinate]: 要转换的欧氏坐标 | 都是用普通列表表示的坐标
@return: 转换结果(极坐标)
"""
def euclid2polar(euclid_coordinate):
return [np.math.sqrt(euclid_coordinate[0]**2 + euclid_coordinate[1]**2), np.math.atan2(euclid_coordinate[1], euclid_coordinate[0])]
class Client():
def __init__(self) -> None:
pass
def run(self, data, label, p_d):
self.__comm = CommClient('127.0.0.1', 12345)
self.__trainer = TR()
self.__polar_position = p_d[0]
self.__polar_direction = p_d[1]
self.__euclid_position = polar2euclid(self.__polar_position)
self.__euclid_direction = polar2euclid(self.__polar_direction)
self.__hi = self.__polar_position[0]**(-params.PATHLOSS_FACTOR)
self.__transmit_power = params.CLIENT_TRANSMIT_POWER
for _ in range(params.ITERATION_NUM):
# 接收来自服务器的 Global Model
global_model = self.__comm.recv()
# 计算梯度
grad = self.__trainer.compute_gradient(global_model, data, label)
# 计算梯度的二范数
grad_norm = compute_norm(grad)
# 向服务器发送结果
self.__comm.send({'grad_norm': grad_norm, 'received_power': self.__hi * self.__transmit_power, 'position': self.__euclid_position})
# 接收服务器的调度结果:1为调度,0为未调度
sche_sig = self.__comm.recv()
if sche_sig == 1:
# 被调度后更新模型,得到 local model
self.__trainer.train_with_grad(grad)
# 向服务器发送 local model
self.__comm.send(self.__trainer.get_weights())
self.__update_user()
def __update_user(self):
self.__move(1)
self.__hi = self.__polar_position[0]**(-params.PATHLOSS_FACTOR)
def __move(self, time_elapsed):
distance = self.__polar_direction[0] * time_elapsed
pose_d = polar2euclid([distance, self.__polar_direction[1]])
self.__euclid_position[0] += pose_d[0]
self.__euclid_position[1] += pose_d[1]
self.__polar_position = euclid2polar(self.__euclid_position)
if self.__polar_position[0] > 100:
normal_dir = polar2euclid([1, self.__polar_position[1]])
dot_product = self.__euclid_direction[0] * normal_dir[0] + self.__euclid_direction[1] * normal_dir[1]
polar_rho_vec = [dot_product, self.__polar_position[1]]
euclid_rho_vec = polar2euclid(polar_rho_vec)
euclid_side_vec = [self.__euclid_direction[0] - euclid_rho_vec[0], self.__euclid_direction[1] - euclid_rho_vec[1]]
self.__euclid_direction[0], self.__euclid_direction[1] = euclid_side_vec[0] - euclid_rho_vec[0], euclid_side_vec[1] - euclid_rho_vec[1]
self.__polar_direction = euclid2polar(self.__euclid_direction)
if __name__ == '__main__':
client = Client()
client.run() | [
"numpy.math.atan2",
"numpy.sum",
"CommClient.CommClient",
"numpy.math.sqrt",
"numpy.math.cos",
"TFTrainer.TFTrainer",
"numpy.math.sin"
] | [((608, 675), 'numpy.math.sqrt', 'np.math.sqrt', (['(euclid_coordinate[0] ** 2 + euclid_coordinate[1] ** 2)'], {}), '(euclid_coordinate[0] ** 2 + euclid_coordinate[1] ** 2)\n', (620, 675), True, 'import numpy as np\n'), ((673, 730), 'numpy.math.atan2', 'np.math.atan2', (['euclid_coordinate[1]', 'euclid_coordinate[0]'], {}), '(euclid_coordinate[1], euclid_coordinate[0])\n', (686, 730), True, 'import numpy as np\n'), ((854, 884), 'CommClient.CommClient', 'CommClient', (['"""127.0.0.1"""', '(12345)'], {}), "('127.0.0.1', 12345)\n", (864, 884), False, 'from CommClient import CommClient\n'), ((910, 914), 'TFTrainer.TFTrainer', 'TR', ([], {}), '()\n', (912, 914), True, 'from TFTrainer import TFTrainer as TR\n'), ((148, 165), 'numpy.sum', 'np.sum', (['(item ** 2)'], {}), '(item ** 2)\n', (154, 165), True, 'import numpy as np\n'), ((361, 393), 'numpy.math.cos', 'np.math.cos', (['polar_coordinate[1]'], {}), '(polar_coordinate[1])\n', (372, 393), True, 'import numpy as np\n'), ((417, 449), 'numpy.math.sin', 'np.math.sin', (['polar_coordinate[1]'], {}), '(polar_coordinate[1])\n', (428, 449), True, 'import numpy as np\n')] |
# Copyright 2018 Recruit Communications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cpp_pyqubo import SubH
from pyqubo.array import Array
from pyqubo.integer import Integer
import numpy as np
class LogEncInteger(Integer):
"""Log encoded integer. The value that takes :math:`[0, n]` is
represented by :math:`\\sum_{i=1}^{\\lceil\\log_{2}n\\rceil}2^ix_{i}` without any constraint.
Args:
label (str): Label of the integer.
lower (int): Lower value of the integer.
upper (int): Upper value of the integer.
Examples:
This example finds the value `a`, `b` such that :math:`a+b=5` and :math:`2a-b=1`.
>>> from pyqubo import LogEncInteger
>>> import dimod
>>> a = LogEncInteger("a", (0, 4))
>>> b = LogEncInteger("b", (0, 4))
>>> M=2.0
>>> H = (2*a-b-1)**2 + M*(a+b-5)**2
>>> model = H.compile()
>>> bqm = model.to_bqm()
>>> import dimod
>>> sampleset = dimod.ExactSolver().sample(bqm)
>>> decoded_samples = model.decode_sampleset(sampleset)
>>> best_sample = min(decoded_samples, key=lambda s: s.energy)
>>> print(best_sample.subh['a'])
2.0
>>> print(best_sample.subh['b'])
3.0
"""
def __init__(self, label, value_range):
lower, upper = value_range
assert upper > lower, "upper value should be larger than lower value"
assert isinstance(lower, int)
assert isinstance(upper, int)
span = upper - lower
self._num_variables = int(np.log2(span)) + 1
self.array = Array.create(label, shape=self._num_variables, vartype='BINARY')
d = self._num_variables - 1
express = lower + sum(self.array[i] * 2 ** i for i in range(self._num_variables - 1))
express += (span - (2**d - 1)) * self.array[-1]
express = SubH(express, label)
super().__init__(
label=label,
value_range=value_range,
express=express)
| [
"cpp_pyqubo.SubH",
"numpy.log2",
"pyqubo.array.Array.create"
] | [((2135, 2199), 'pyqubo.array.Array.create', 'Array.create', (['label'], {'shape': 'self._num_variables', 'vartype': '"""BINARY"""'}), "(label, shape=self._num_variables, vartype='BINARY')\n", (2147, 2199), False, 'from pyqubo.array import Array\n'), ((2404, 2424), 'cpp_pyqubo.SubH', 'SubH', (['express', 'label'], {}), '(express, label)\n', (2408, 2424), False, 'from cpp_pyqubo import SubH\n'), ((2095, 2108), 'numpy.log2', 'np.log2', (['span'], {}), '(span)\n', (2102, 2108), True, 'import numpy as np\n')] |
import numpy as np
data_path = "data/problem_11.txt"
# data_path = "data/problem_11_test.txt"
data = []
with open(data_path, "r") as f:
for line in f:
data.append([int(char) for char in line.rstrip()])
data = np.array(data)
print("Initial state:")
def get_neighborhood_view(state, y, x):
# return a view of the 3x3 region around (y, x)
return state[max(0, y - 1) : y + 2, max(0, x - 1) : x + 2]
def step(state):
state += 1
flash_count = 0
while True:
triggered_y, triggered_x = np.where(state > 9)
number_triggered = len(triggered_y)
# step ends when no further 🐙 are triggered
if number_triggered == 0:
break
flash_count += number_triggered
# convolve 3x3 box with triggered cells
for y, x in zip(triggered_y, triggered_x):
neighbors = get_neighborhood_view(state, y, x)
neighbors[neighbors > 0] += 1
state[triggered_y, triggered_x] = 0
return flash_count
# part 1
data_part_1 = data.copy()
total_flashes = 0
for i in range(100):
total_flashes += step(data_part_1)
print(f"State at step {i + 1}:")
print(data_part_1)
print(f"Part 1 solution: {total_flashes}")
# part 2
data_part_2 = data.copy()
total_flashes = 0
number_of_octopi = np.product(data_part_2.shape)
for i in range(10000):
if step(data_part_2) == number_of_octopi:
print(f"State at step {i + 1}:")
print(data_part_2)
print(f"Part 2 solution: {i + 1}")
break
| [
"numpy.product",
"numpy.where",
"numpy.array"
] | [((223, 237), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (231, 237), True, 'import numpy as np\n'), ((1289, 1318), 'numpy.product', 'np.product', (['data_part_2.shape'], {}), '(data_part_2.shape)\n', (1299, 1318), True, 'import numpy as np\n'), ((525, 544), 'numpy.where', 'np.where', (['(state > 9)'], {}), '(state > 9)\n', (533, 544), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.keras import layers
pd.options.display.max_rows = 10
pd.options.display.float_format = "{:.1f}".format
tf.keras.backend.set_floatx('float32')
print("Modules Imported")
train_df = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv")
test_df = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv")
train_df = train_df.reindex(np.random.permutation(train_df.index))
train_df_mean = train_df.mean()
train_df_std = train_df.std()
train_df_norm = (train_df - train_df_mean)/train_df_std
print(train_df_norm.head())
test_df_mean = test_df.mean()
test_df_std = test_df.std()
test_df_norm = (test_df - test_df_mean)/test_df_std
threshold = 265000
train_df_norm["median_house_value_is_high"] = (train_df["median_house_value"] > threshold).astype(float)
test_df_norm["median_house_value_is_high"] = (test_df["median_house_value"] > threshold).astype(float)
print(train_df_norm["median_house_value_is_high"].head(8000))
#below is an alternative based on the z score values
# threshold_in_Z = 1.0
# train_df_norm["median_house_value_is_high"] = (train_df_norm["median_house_value"] > threshold_in_Z).astype(float)
# test_df_norm["median_house_value_is_high"] = (test_df_norm["median_house_value"] > threshold_in_Z).astype(float)
feature_columns = []
median_income = tf.feature_column.numeric_column("median_income")
tr = tf.feature_column.numeric_column("total_rooms")
feature_columns.append(median_income)
feature_columns.append(tr)
feature_layer = layers.DenseFeatures(feature_columns)
print(feature_layer(dict(train_df_norm)))
def create_model(my_learning_rate, feature_layer, my_metrics):
model = tf.keras.models.Sequential()
model.add(feature_layer)
model.add(tf.keras.layers.Dense(units=1, input_shape=(1,), activation=tf.sigmoid),)
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=my_learning_rate),loss=tf.keras.losses.BinaryCrossentropy(),metrics=my_metrics)
return model
def train_model(model, dataset, epochs, label_name, batch_size=None, shuffle=True):
features = {name:np.array(value) for name, value in dataset.items()}
label = np.array(features.pop(label_name))
history = model.fit(x=features, y=label, batch_size=batch_size, epochs=epochs, shuffle=shuffle)
epochs = history.epoch
hist = pd.DataFrame(history.history)
return epochs, hist
print("Defined the create_model and train_model functions")
def plot_curve(epochs, hist, list_of_metrics):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel("Value")
for m in list_of_metrics:
x = hist[m]
plt.plot(epochs[1:],x[1:],label=m)
plt.legend()
plt.show()
print("Defined the plot_curve function")
# https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#define_the_model_and_metrics
learning_rate = 0.001
epochs = 20
batch_size = 100
label_name = "median_house_value_is_high"
classification_threshold = 0.35
# A `classification_threshold` of 0.52 appears to produce the highest accuracy (about 83%).
# Raising the `classification_threshold` to 0.9 drops accuracy by about 5%.
# Lowering the classification_threshold` to 0.3 drops accuracy by about 3%.
METRICS = [tf.keras.metrics.BinaryAccuracy(name='accuracy', threshold=classification_threshold), tf.keras.metrics.Precision(thresholds=classification_threshold, name='precision'), tf.keras.metrics.Recall(thresholds=classification_threshold, name='recall'),tf.keras.metrics.AUC(num_thresholds=100, name='auc')]
my_model = None
my_model = create_model(learning_rate, feature_layer, METRICS)
epochs, hist = train_model(my_model, train_df_norm, epochs, label_name, batch_size)
list_of_metrics_to_plot = ['accuracy', 'precision', 'recall', 'auc']
plot_curve(epochs, hist, list_of_metrics_to_plot)
features = {name:np.array(value) for name, value in test_df_norm.items()}
label = np.array(features.pop(label_name))
my_model.evaluate(x = features, y = label, batch_size=batch_size)
| [
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.metrics.BinaryAccuracy",
"tensorflow.keras.optimizers.RMSprop",
"pandas.DataFrame",
"tensorflow.keras.layers.DenseFeatures",
"tensorflow.keras.metrics.Precision",
... | [((228, 266), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float32"""'], {}), "('float32')\n", (255, 266), True, 'import tensorflow as tf\n'), ((310, 411), 'pandas.read_csv', 'pd.read_csv', (['"""https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv"""'], {}), "(\n 'https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv'\n )\n", (321, 411), True, 'import pandas as pd\n'), ((413, 513), 'pandas.read_csv', 'pd.read_csv', (['"""https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv"""'], {}), "(\n 'https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv'\n )\n", (424, 513), True, 'import pandas as pd\n'), ((1490, 1539), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""median_income"""'], {}), "('median_income')\n", (1522, 1539), True, 'import tensorflow as tf\n'), ((1546, 1593), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""total_rooms"""'], {}), "('total_rooms')\n", (1578, 1593), True, 'import tensorflow as tf\n'), ((1678, 1715), 'tensorflow.keras.layers.DenseFeatures', 'layers.DenseFeatures', (['feature_columns'], {}), '(feature_columns)\n', (1698, 1715), False, 'from tensorflow.keras import layers\n'), ((533, 570), 'numpy.random.permutation', 'np.random.permutation', (['train_df.index'], {}), '(train_df.index)\n', (554, 570), True, 'import numpy as np\n'), ((1838, 1866), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (1864, 1866), True, 'import tensorflow as tf\n'), ((2494, 2523), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (2506, 2523), True, 'import pandas as pd\n'), ((2667, 2679), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2677, 2679), True, 'from matplotlib import pyplot as plt\n'), ((2685, 2704), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (2695, 2704), True, 'from matplotlib import pyplot as plt\n'), ((2710, 2729), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), "('Value')\n", (2720, 2729), True, 'from matplotlib import pyplot as plt\n'), ((2831, 2843), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2841, 2843), True, 'from matplotlib import pyplot as plt\n'), ((2849, 2859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2857, 2859), True, 'from matplotlib import pyplot as plt\n'), ((3396, 3485), 'tensorflow.keras.metrics.BinaryAccuracy', 'tf.keras.metrics.BinaryAccuracy', ([], {'name': '"""accuracy"""', 'threshold': 'classification_threshold'}), "(name='accuracy', threshold=\n classification_threshold)\n", (3427, 3485), True, 'import tensorflow as tf\n'), ((3482, 3568), 'tensorflow.keras.metrics.Precision', 'tf.keras.metrics.Precision', ([], {'thresholds': 'classification_threshold', 'name': '"""precision"""'}), "(thresholds=classification_threshold, name=\n 'precision')\n", (3508, 3568), True, 'import tensorflow as tf\n'), ((3565, 3640), 'tensorflow.keras.metrics.Recall', 'tf.keras.metrics.Recall', ([], {'thresholds': 'classification_threshold', 'name': '"""recall"""'}), "(thresholds=classification_threshold, name='recall')\n", (3588, 3640), True, 'import tensorflow as tf\n'), ((3641, 3693), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {'num_thresholds': '(100)', 'name': '"""auc"""'}), "(num_thresholds=100, name='auc')\n", (3661, 3693), True, 'import tensorflow as tf\n'), ((4004, 4019), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (4012, 4019), True, 'import numpy as np\n'), ((1912, 1983), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '(1,)', 'activation': 'tf.sigmoid'}), '(units=1, input_shape=(1,), activation=tf.sigmoid)\n', (1933, 1983), True, 'import tensorflow as tf\n'), ((2253, 2268), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (2261, 2268), True, 'import numpy as np\n'), ((2791, 2827), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs[1:]', 'x[1:]'], {'label': 'm'}), '(epochs[1:], x[1:], label=m)\n', (2799, 2827), True, 'from matplotlib import pyplot as plt\n'), ((2015, 2063), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'lr': 'my_learning_rate'}), '(lr=my_learning_rate)\n', (2042, 2063), True, 'import tensorflow as tf\n'), ((2069, 2105), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {}), '()\n', (2103, 2105), True, 'import tensorflow as tf\n')] |
# -*- encoding: utf-8 -*-
# Module iaptrans
from numpy import *
def iaptrans(f,t):
import numpy as np
g = np.empty(f.shape)
if f.ndim == 1:
W = f.shape[0]
col = arange(W)
g[:] = f[(col-t)%W]
elif f.ndim == 2:
H,W = f.shape
rr,cc = t
row,col = np.indices(f.shape)
g[:] = f[(row-rr)%H, (col-cc)%W]
elif f.ndim == 3:
Z,H,W = f.shape
zz,rr,cc = t
z,row,col = np.indices(f.shape)
g[:] = f[(z-zz)%Z, (row-rr)%H, (col-cc)%W]
return g
# implementation using periodic convolution
def iaptrans2(f, t):
from ia636 import iapconv
f, t = asarray(f), asarray(t).astype(int32)
h = zeros(2*abs(t) + 1)
t = t + abs(t)
h[tuple(t)] = 1
g = iapconv(f, h)
return g
| [
"numpy.empty",
"ia636.iapconv",
"numpy.indices"
] | [((116, 133), 'numpy.empty', 'np.empty', (['f.shape'], {}), '(f.shape)\n', (124, 133), True, 'import numpy as np\n'), ((739, 752), 'ia636.iapconv', 'iapconv', (['f', 'h'], {}), '(f, h)\n', (746, 752), False, 'from ia636 import iapconv\n'), ((297, 316), 'numpy.indices', 'np.indices', (['f.shape'], {}), '(f.shape)\n', (307, 316), True, 'import numpy as np\n'), ((437, 456), 'numpy.indices', 'np.indices', (['f.shape'], {}), '(f.shape)\n', (447, 456), True, 'import numpy as np\n')] |
import solver.solutionInstance as solutionInstance
import numpy as np
import random
def removeBestGroupSpecialistMeeting(self: solutionInstance.SolutionInstance):
groupOverflows = np.sum(self.meetByPeriodByDayBySpecialistByGroup, axis=(2, 3)) - self.classesAndResources.groupsNeeds
maxOverflow = np.max(groupOverflows)
if maxOverflow <= 0:
return None
overflowIndices = np.where(groupOverflows == maxOverflow)
overflowIndex = random.randrange(0, len(overflowIndices[0]))
removingGroup = overflowIndices[0][overflowIndex]
removingSpecialist = overflowIndices[1][overflowIndex]
groupAndSpecialistMeetingIndices = np.where(self.meetByPeriodByDayByLocalBySubjectByGroup[removingGroup, removingSpecialist])
removingMeetingIndex = random.randrange(0, len(groupAndSpecialistMeetingIndices[0]))
removingLocal = groupAndSpecialistMeetingIndices[0][removingMeetingIndex]
removingDay = groupAndSpecialistMeetingIndices[1][removingMeetingIndex]
removingPeriod = groupAndSpecialistMeetingIndices[2][removingMeetingIndex]
meetByPeriodByDayByLocalBySubjectByGroup = np.copy(self.meetByPeriodByDayByLocalBySubjectByGroup)
meetByPeriodByDayByLocalBySubjectByGroup[removingGroup,
removingSpecialist,
removingLocal,
removingDay,
removingPeriod] = False
return solutionInstance.SolutionInstance(self.classesAndResources, meetByPeriodByDayByLocalBySubjectByGroup) | [
"solver.solutionInstance.SolutionInstance",
"numpy.sum",
"numpy.copy",
"numpy.max",
"numpy.where"
] | [((306, 328), 'numpy.max', 'np.max', (['groupOverflows'], {}), '(groupOverflows)\n', (312, 328), True, 'import numpy as np\n'), ((398, 437), 'numpy.where', 'np.where', (['(groupOverflows == maxOverflow)'], {}), '(groupOverflows == maxOverflow)\n', (406, 437), True, 'import numpy as np\n'), ((658, 752), 'numpy.where', 'np.where', (['self.meetByPeriodByDayByLocalBySubjectByGroup[removingGroup, removingSpecialist\n ]'], {}), '(self.meetByPeriodByDayByLocalBySubjectByGroup[removingGroup,\n removingSpecialist])\n', (666, 752), True, 'import numpy as np\n'), ((1121, 1175), 'numpy.copy', 'np.copy', (['self.meetByPeriodByDayByLocalBySubjectByGroup'], {}), '(self.meetByPeriodByDayByLocalBySubjectByGroup)\n', (1128, 1175), True, 'import numpy as np\n'), ((1512, 1617), 'solver.solutionInstance.SolutionInstance', 'solutionInstance.SolutionInstance', (['self.classesAndResources', 'meetByPeriodByDayByLocalBySubjectByGroup'], {}), '(self.classesAndResources,\n meetByPeriodByDayByLocalBySubjectByGroup)\n', (1545, 1617), True, 'import solver.solutionInstance as solutionInstance\n'), ((186, 248), 'numpy.sum', 'np.sum', (['self.meetByPeriodByDayBySpecialistByGroup'], {'axis': '(2, 3)'}), '(self.meetByPeriodByDayBySpecialistByGroup, axis=(2, 3))\n', (192, 248), True, 'import numpy as np\n')] |
import os
import time
import s3fs
import boto3
import json
import argparse
import pandas as pd
import numpy as np
import pathlib
import sagemaker
from sagemaker.feature_store.feature_group import FeatureGroup
# Parse argument variables passed via the CreateDataset processing step
parser = argparse.ArgumentParser()
parser.add_argument('--signups-feature-group-name', type=str)
parser.add_argument('--outcomes-feature-group-name', type=str)
parser.add_argument('--region', type=str)
parser.add_argument('--bucket-name', type=str)
parser.add_argument('--bucket-prefix', type=str)
args = parser.parse_args()
region = args.region
signups_fg_name = args.signups_feature_group_name
outcomes_fg_name = args.outcomes_feature_group_name
#Initialize Boto3 session
boto3.setup_default_session(region_name=region)
boto_session = boto3.Session(region_name=region)
#initialize S3 client
s3_client = boto3.client('s3')
#initialize Sagemaker client and roles
sagemaker_boto_client = boto_session.client('sagemaker')
sagemaker_session = sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_boto_client)
sagemaker_role = sagemaker.get_execution_role()
#initalize Athena client
athena = boto3.client('athena', region_name=region)
#-----Declare global variables
sg_features=[]
oc_features=[]
sg_db = ''
sg_table = ''
oc_db = ''
oc_table = ''
afd_meta_labels = ['EVENT_TIMESTAMP', 'EVENT_LABEL']
ignore_col = 'EventTime'
#------Lookup feature store details-----------
# Initialize feature store runtime and session
def get_feature_store():
featurestore_runtime = boto_session.client(
service_name='sagemaker-featurestore-runtime',
region_name=region
)
feature_store_session = sagemaker.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_boto_client,
sagemaker_featurestore_runtime_client=featurestore_runtime
)
signups_feature_group = FeatureGroup(
name=signups_fg_name,
sagemaker_session=feature_store_session)
outcomes_feature_group = FeatureGroup(
name=outcomes_fg_name,
sagemaker_session=feature_store_session)
try:
signups_fg_metadata = signups_feature_group.describe()
outecomes_fg_metadata = outcomes_feature_group.describe()
s_fg_status = signups_fg_metadata['OfflineStoreStatus']['Status']
o_fg_status = outecomes_fg_metadata['OfflineStoreStatus']['Status']
#Wait for feature stores to become Active
stime = time.time()
while True:
if s_fg_status == 'Active' and o_fg_status == 'Active':
print(f"Feature Store Offline Stores are Active")
break
elif s_fg_status in ['CreateFailed', 'Deleting', 'DeleteFailed'] or o_fg_status in ['CreateFailed', 'Deleting', 'DeleteFailed']:
print(f'Feature Group data ingestion problem: {signups_fg_name}:{s_fg_status}, {outcomes_fg_name}:{o_fg_status}')
os._exit(1)
else:
print(f"Current progress: {(time.time() - stime)/60:{3}.{3}} minutes")
print(f"Waiting for Feature Store Offline Stores to become Active")
sleep(30)
signups_fg_metadata = signups_feature_group.describe()
outecomes_fg_metadata = outcomes_feature_group.describe()
s_fg_status = signups_fg_metadata['OfflineStoreStatus']['Status']
o_fg_status = outecomes_fg_metadata['OfflineStoreStatus']['Status']
except Exception as e:
print(e)
os._exit(1)
return signups_fg_metadata, outecomes_fg_metadata
#------Generate Athena Query based on features in the Feature store----
# this function by default finds the common columns in the two feature groups
# and sets the where clause using the common columns on the two tables. Modify as appropriate
def gen_query():
signup_features=[]
outcomes_features=[]
separator = ", "
and_clause = " AND "
for i in sg_features:
if i['FeatureName'] != ignore_col: signup_features.append(i['FeatureName'])
for i in oc_features:
if i['FeatureName'] != ignore_col: outcomes_features.append(i['FeatureName'])
signup_features = np.array(signup_features)
outcomes_features = np.array(outcomes_features)
# Common columns
common = list(np.intersect1d(signup_features, outcomes_features))
common_cols = [f'"{sg_table}".{i} as {i}' for i in common]
diff_cols_signups = list(set(common).symmetric_difference(signup_features))
diff_cols_outcomes = list(set(common).symmetric_difference(outcomes_features))
join_string = [f'"{sg_table}".{i} = "{oc_table}".{i}' for i in common]
join_clause = and_clause.join(join_string)
select_stmt = f"""
SELECT DISTINCT {separator.join(common_cols)},{separator.join(diff_cols_signups)},{separator.join(diff_cols_outcomes)}
FROM "{sg_table}" LEFT JOIN "{oc_table}" ON
{join_clause}
"""
#--Data schema metadata
all_cols = np.unique(np.concatenate([signup_features,outcomes_features]))
schema = {
'modelVariables': list(set(afd_meta_labels).symmetric_difference(all_cols)),
}
print(f'Variables: {schema}')
return select_stmt, schema
#----Run Query on offline Feature Store datastore and generate training dataset
def gen_training_data(query, schema):
try:
query_execution = athena.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': sg_db
},
ResultConfiguration={
'OutputLocation': f's3://{args.bucket_name}/{args.bucket_prefix}/afd-pipeline/query_results/'
}
)
query_execution_id = query_execution.get('QueryExecutionId')
query_details = athena.get_query_execution(QueryExecutionId=query_execution_id)
query_status = query_details['QueryExecution']['Status']['State']
#--Wait for query to finish executing
print(f'Query ID: {query_execution_id}')
while query_status in ['QUEUED', 'RUNNING']:
print(f'Query status: {query_status}')
time.sleep(30)
query_details = athena.get_query_execution(QueryExecutionId=query_execution_id)
query_status = query_details['QueryExecution']['Status']['State']
print(f'Query status: {query_status}')
query_result_s3_uri = query_details['QueryExecution']['ResultConfiguration']['OutputLocation']
df_train = pd.read_csv(query_result_s3_uri)
train_output_path = pathlib.Path('/opt/ml/processing/output/train')
#--Write the final training dataset CSV file--
df_train.to_csv(train_output_path / 'afd_training_data.csv', index=False)
#--Generate Training data schema
train_schema_path = pathlib.Path('/opt/ml/processing/output/schema')
trainingDataSchema = {
'modelVariables': schema['modelVariables'],
'labelSchema':{
'labelMapper': {
'FRAUD': [df_train["EVENT_LABEL"].value_counts().idxmin()],
'LEGIT': [df_train["EVENT_LABEL"].value_counts().idxmax()]
}
}
}
with open(train_schema_path / 'schema.json', 'w') as outfile:
json.dump(trainingDataSchema, outfile)
print(f'Training Dataset and Training Data Schema Generated: {trainingDataSchema}')
except Exception as e:
print(e)
os._exit(1)
def gen_train_data():
select_query, schema = gen_query()
print(f'Athena Query: {select_query}')
gen_training_data(select_query,schema)
signups_fg_metadata, outecomes_fg_metadata = get_feature_store()
if signups_fg_metadata['OfflineStoreStatus']['Status'] == 'Active' and outecomes_fg_metadata['OfflineStoreStatus']['Status'] == 'Active':
print('Offline Data Store is active active')
sg_features = signups_fg_metadata['FeatureDefinitions']
oc_features = outecomes_fg_metadata['FeatureDefinitions']
sg_db = signups_fg_metadata['OfflineStoreConfig']['DataCatalogConfig']['Database']
sg_table = signups_fg_metadata['OfflineStoreConfig']['DataCatalogConfig']['TableName']
oc_db = outecomes_fg_metadata['OfflineStoreConfig']['DataCatalogConfig']['Database']
oc_table = outecomes_fg_metadata['OfflineStoreConfig']['DataCatalogConfig']['TableName']
gen_train_data()
else:
print('Offline Data Store is Inactive')
os._exit(1) | [
"sagemaker.feature_store.feature_group.FeatureGroup",
"json.dump",
"argparse.ArgumentParser",
"boto3.Session",
"boto3.client",
"boto3.setup_default_session",
"pandas.read_csv",
"sagemaker.get_execution_role",
"time.time",
"time.sleep",
"pathlib.Path",
"os._exit",
"numpy.array",
"sagemaker.... | [((291, 316), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (314, 316), False, 'import argparse\n'), ((758, 805), 'boto3.setup_default_session', 'boto3.setup_default_session', ([], {'region_name': 'region'}), '(region_name=region)\n', (785, 805), False, 'import boto3\n'), ((821, 854), 'boto3.Session', 'boto3.Session', ([], {'region_name': 'region'}), '(region_name=region)\n', (834, 854), False, 'import boto3\n'), ((890, 908), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (902, 908), False, 'import boto3\n'), ((1026, 1123), 'sagemaker.session.Session', 'sagemaker.session.Session', ([], {'boto_session': 'boto_session', 'sagemaker_client': 'sagemaker_boto_client'}), '(boto_session=boto_session, sagemaker_client=\n sagemaker_boto_client)\n', (1051, 1123), False, 'import sagemaker\n'), ((1145, 1175), 'sagemaker.get_execution_role', 'sagemaker.get_execution_role', ([], {}), '()\n', (1173, 1175), False, 'import sagemaker\n'), ((1211, 1253), 'boto3.client', 'boto3.client', (['"""athena"""'], {'region_name': 'region'}), "('athena', region_name=region)\n", (1223, 1253), False, 'import boto3\n'), ((1731, 1885), 'sagemaker.Session', 'sagemaker.Session', ([], {'boto_session': 'boto_session', 'sagemaker_client': 'sagemaker_boto_client', 'sagemaker_featurestore_runtime_client': 'featurestore_runtime'}), '(boto_session=boto_session, sagemaker_client=\n sagemaker_boto_client, sagemaker_featurestore_runtime_client=\n featurestore_runtime)\n', (1748, 1885), False, 'import sagemaker\n'), ((1935, 2010), 'sagemaker.feature_store.feature_group.FeatureGroup', 'FeatureGroup', ([], {'name': 'signups_fg_name', 'sagemaker_session': 'feature_store_session'}), '(name=signups_fg_name, sagemaker_session=feature_store_session)\n', (1947, 2010), False, 'from sagemaker.feature_store.feature_group import FeatureGroup\n'), ((2059, 2135), 'sagemaker.feature_store.feature_group.FeatureGroup', 'FeatureGroup', ([], {'name': 'outcomes_fg_name', 'sagemaker_session': 'feature_store_session'}), '(name=outcomes_fg_name, sagemaker_session=feature_store_session)\n', (2071, 2135), False, 'from sagemaker.feature_store.feature_group import FeatureGroup\n'), ((4323, 4348), 'numpy.array', 'np.array', (['signup_features'], {}), '(signup_features)\n', (4331, 4348), True, 'import numpy as np\n'), ((4373, 4400), 'numpy.array', 'np.array', (['outcomes_features'], {}), '(outcomes_features)\n', (4381, 4400), True, 'import numpy as np\n'), ((8723, 8734), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (8731, 8734), False, 'import os\n'), ((2531, 2542), 'time.time', 'time.time', ([], {}), '()\n', (2540, 2542), False, 'import time\n'), ((4445, 4495), 'numpy.intersect1d', 'np.intersect1d', (['signup_features', 'outcomes_features'], {}), '(signup_features, outcomes_features)\n', (4459, 4495), True, 'import numpy as np\n'), ((5151, 5203), 'numpy.concatenate', 'np.concatenate', (['[signup_features, outcomes_features]'], {}), '([signup_features, outcomes_features])\n', (5165, 5203), True, 'import numpy as np\n'), ((6712, 6744), 'pandas.read_csv', 'pd.read_csv', (['query_result_s3_uri'], {}), '(query_result_s3_uri)\n', (6723, 6744), True, 'import pandas as pd\n'), ((6773, 6820), 'pathlib.Path', 'pathlib.Path', (['"""/opt/ml/processing/output/train"""'], {}), "('/opt/ml/processing/output/train')\n", (6785, 6820), False, 'import pathlib\n'), ((7045, 7093), 'pathlib.Path', 'pathlib.Path', (['"""/opt/ml/processing/output/schema"""'], {}), "('/opt/ml/processing/output/schema')\n", (7057, 7093), False, 'import pathlib\n'), ((3625, 3636), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (3633, 3636), False, 'import os\n'), ((6332, 6346), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (6342, 6346), False, 'import time\n'), ((7534, 7572), 'json.dump', 'json.dump', (['trainingDataSchema', 'outfile'], {}), '(trainingDataSchema, outfile)\n', (7543, 7572), False, 'import json\n'), ((7726, 7737), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (7734, 7737), False, 'import os\n'), ((3018, 3029), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (3026, 3029), False, 'import os\n'), ((3092, 3103), 'time.time', 'time.time', ([], {}), '()\n', (3101, 3103), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 10:49:09 2021
@author: user
"""
import numpy as np
from copy import deepcopy
class EKF_JansenRit:
def __init__(self, X, P, Q, R, UT, dt):
self.X = X
self.P = P
self.Q = Q
self.R = R
self.UT = UT
self.dt = dt
def Sigm(self, v):
v0 = 6
vmax = 5
r = 0.56
sigm = vmax / (1 + np.exp(r * ( v0 - v )))
return sigm
def Sigm_diff(self, v):
r = 0.56
vmax = 5
sigm_diff = r/vmax * self.Sigm(v) * (vmax - self.Sigm(v))
return sigm_diff
def Jacobian(self, x, par, dt):
c1 = 135
c2 = 0.8 * c1
c3 = 0.25 * c1
c4 = 0.25 * c1
A = par[0]
a = par[1]
B = par[2]
b = par[3]
u = par[4]
#### Calculate Jacobian Jx = df/dx
O = np.zeros((3,3))
I = np.eye(3)
diag = np.diag([-2*a, -2*a, -2*b])
dGdx = np.array([
[ -a*a, A*a*self.Sigm_diff(x[1]-x[2]), -A*a*self.Sigm_diff(x[1]-x[2])],
[A*a*c1*c2*self.Sigm_diff(c1*x[0]), -a*a, 0],
[B*b*c3*c4*self.Sigm_diff(c3*x[0]), 0, -b*b]
])
term1 = np.hstack((O, I))
term2 = np.hstack((dGdx, diag))
J_x = np.vstack((term1,term2))
#### Calculate Jacobian Jpar = df/d par
term3 = np.zeros((3, len(par)))
term4 = np.array([
[ a*self.Sigm(x[1]-x[2]), A*self.Sigm(x[1]-x[2])-2*x[3]-2*a*x[0], 0, 0, 0],
[a*(u+c2*self.Sigm(c1*x[0])), A*(u+c2*self.Sigm(c1*x[0]))-2*x[4]-2*a*x[1], 0, 0, A*a],
[ 0, 0, b*c4*self.Sigm(c3*x[0]), B*c4*self.Sigm(c3*x[0])-2*x[5]-2*b*x[2], 0]
])
J_par = np.vstack((term3, term4))
##### combine two Jacobian matrix
J = np.hstack((J_x, J_par))
return J
###################
def predict(self):
X = self.X
P = self.P
Q = self.Q
dt = self.dt
x = deepcopy(X[:6])
par = deepcopy(X[6:])
Npar = len(par)
Nx = len(x)
### Calculate Jacobian matrix A
J = self.Jacobian(x, par, dt)
tmp = np.zeros((Npar, Nx + Npar))#np.hstack((np.zeros((Npar,Nx)), np.eye(Npar)))
A = np.vstack((J, tmp))
### Convert from Jacobian matrix A to State transition matrix F
## Taylor approximation of matrix exponential
F = np.eye(len(X)) + A*dt # F = exp (A * dt) = I + A * dt
## Update State model
X_new = F @ X # F X = exp (A * dt) X
x_new = X_new[:6]#x + X_new[:6] * dt + eta * np.sqrt(dt) * np.random.normal(loc=0, scale=1, size=Nx)
par_new = X_new[6:]
XPred = np.hstack((x_new, par_new))
PPred = F @ P @ F.T + Q
self.X = XPred
self.P = PPred
def update(self):
z = self.z
X = self.X
P = self.P
R = self.R
UT = self.UT
D = np.array([
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
])
ub = np.array([5.00, 100, 60, 100, 300])
lb = np.array([0.00, 10, 0.00, 10, 180])
b = np.zeros(ub.shape)
H = np.array([[0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0]])
zPred = H @ X
y = z - zPred # prediction error of observation model
S = H @ P @ H.T + R
S_inv = np.linalg.inv(S)
K = P @ H.T @ S_inv
X_new = X + K @ y
P_new = P - K @ H @ P # P - K @ S @ K.T
##### inequality constraints ##########################################
### Constraint would be applied only when the inequality condition is not satisfied.
I = np.eye(len(X_new))
W_inv = np.linalg.inv(P_new)
L = W_inv @ D.T @ np.linalg.pinv(D @ W_inv @ D.T)
value = D @ X_new
for i in range(len(value)):
if (value[i] > ub[i]) | (value[i] < lb[i]):
if (value[i] > ub[i]):
b[i] = ub[i]
elif (value[i] < lb[i]):
b[i] = lb[i]
## Calculate state variables with interval contraints
X_c = X_new - L @ (D @ X_new - b)
for i in range(len(value)):
if (value[i] > ub[i]) | (value[i] < lb[i]):
X_new[i+6] = X_c[i+6]
##### inequality constraints ##########################################
R = (1-UT) * R + UT * y**2
### log-likelihood
_, logdet = np.linalg.slogdet(S)
loglike = -0.5 * (np.log(2*np.pi) + logdet + y @ S_inv@ y)
self.X = X_new
self.P = P_new
self.zPred = zPred
self.S = S
self.R = R
self.loglike = loglike
def ekf_estimation(self, z):
self.z = z
# Prediction step (estimate state variable)
self.predict()
# Update state (Update parameters)
self.update()
| [
"numpy.diag",
"copy.deepcopy",
"numpy.log",
"numpy.zeros",
"numpy.hstack",
"numpy.array",
"numpy.linalg.inv",
"numpy.linalg.slogdet",
"numpy.exp",
"numpy.eye",
"numpy.linalg.pinv",
"numpy.vstack"
] | [((1022, 1038), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1030, 1038), True, 'import numpy as np\n'), ((1054, 1063), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1060, 1063), True, 'import numpy as np\n'), ((1080, 1113), 'numpy.diag', 'np.diag', (['[-2 * a, -2 * a, -2 * b]'], {}), '([-2 * a, -2 * a, -2 * b])\n', (1087, 1113), True, 'import numpy as np\n'), ((1569, 1586), 'numpy.hstack', 'np.hstack', (['(O, I)'], {}), '((O, I))\n', (1578, 1586), True, 'import numpy as np\n'), ((1603, 1626), 'numpy.hstack', 'np.hstack', (['(dGdx, diag)'], {}), '((dGdx, diag))\n', (1612, 1626), True, 'import numpy as np\n'), ((1650, 1675), 'numpy.vstack', 'np.vstack', (['(term1, term2)'], {}), '((term1, term2))\n', (1659, 1675), True, 'import numpy as np\n'), ((2359, 2384), 'numpy.vstack', 'np.vstack', (['(term3, term4)'], {}), '((term3, term4))\n', (2368, 2384), True, 'import numpy as np\n'), ((2443, 2466), 'numpy.hstack', 'np.hstack', (['(J_x, J_par)'], {}), '((J_x, J_par))\n', (2452, 2466), True, 'import numpy as np\n'), ((2664, 2679), 'copy.deepcopy', 'deepcopy', (['X[:6]'], {}), '(X[:6])\n', (2672, 2679), False, 'from copy import deepcopy\n'), ((2698, 2713), 'copy.deepcopy', 'deepcopy', (['X[6:]'], {}), '(X[6:])\n', (2706, 2713), False, 'from copy import deepcopy\n'), ((2886, 2913), 'numpy.zeros', 'np.zeros', (['(Npar, Nx + Npar)'], {}), '((Npar, Nx + Npar))\n', (2894, 2913), True, 'import numpy as np\n'), ((2979, 2998), 'numpy.vstack', 'np.vstack', (['(J, tmp)'], {}), '((J, tmp))\n', (2988, 2998), True, 'import numpy as np\n'), ((3467, 3494), 'numpy.hstack', 'np.hstack', (['(x_new, par_new)'], {}), '((x_new, par_new))\n', (3476, 3494), True, 'import numpy as np\n'), ((3751, 3945), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, \n 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0\n ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (3759, 3945), True, 'import numpy as np\n'), ((4110, 4144), 'numpy.array', 'np.array', (['[5.0, 100, 60, 100, 300]'], {}), '([5.0, 100, 60, 100, 300])\n', (4118, 4144), True, 'import numpy as np\n'), ((4167, 4200), 'numpy.array', 'np.array', (['[0.0, 10, 0.0, 10, 180]'], {}), '([0.0, 10, 0.0, 10, 180])\n', (4175, 4200), True, 'import numpy as np\n'), ((4224, 4242), 'numpy.zeros', 'np.zeros', (['ub.shape'], {}), '(ub.shape)\n', (4232, 4242), True, 'import numpy as np\n'), ((4268, 4314), 'numpy.array', 'np.array', (['[[0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0]])\n', (4276, 4314), True, 'import numpy as np\n'), ((4460, 4476), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (4473, 4476), True, 'import numpy as np\n'), ((4831, 4851), 'numpy.linalg.inv', 'np.linalg.inv', (['P_new'], {}), '(P_new)\n', (4844, 4851), True, 'import numpy as np\n'), ((5608, 5628), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['S'], {}), '(S)\n', (5625, 5628), True, 'import numpy as np\n'), ((4882, 4913), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(D @ W_inv @ D.T)'], {}), '(D @ W_inv @ D.T)\n', (4896, 4913), True, 'import numpy as np\n'), ((458, 478), 'numpy.exp', 'np.exp', (['(r * (v0 - v))'], {}), '(r * (v0 - v))\n', (464, 478), True, 'import numpy as np\n'), ((5666, 5683), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (5672, 5683), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME>
# Created : 2018-12-08
# Last Modified: 2018-12-08
# Vanderbilt University
from __future__ import absolute_import, division, print_function
__author__ = ['<NAME>']
__copyright__ = ["Copyright 2018 <NAME>, "]
__email__ = ['<EMAIL>']
__maintainer__ = ['<NAME>']
"""
"""
# Importing Modules
from cosmo_utils import mock_catalogues as cm
from cosmo_utils import utils as cu
from cosmo_utils.utils import file_utils as cfutils
from cosmo_utils.utils import file_readers as cfreaders
from cosmo_utils.utils import work_paths as cwpaths
from cosmo_utils.utils import web_utils as cweb
from cosmo_utils.utils import stats_funcs as cstats
from cosmo_utils.utils import geometry as cgeom
from cosmo_utils.mock_catalogues import catls_utils as cmcu
from cosmo_utils.mock_catalogues import mags_calculations as cmags
import numpy as num
import math
import os
import sys
import pandas as pd
import pickle
import matplotlib
matplotlib.use( 'Agg' )
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.rc('text', usetex=True)
import seaborn as sns
#sns.set()
from progressbar import (Bar, ETA, FileTransferSpeed, Percentage, ProgressBar,
ReverseBar, RotatingMarker)
from tqdm import tqdm
from datetime import datetime
# Project packages
from src.survey_utils import ReadSurvey
import hmf
import astropy.cosmology as astrocosmo
import astropy.constants as ac
import astropy.units as u
import astropy.table as astro_table
import requests
from collections import Counter
import subprocess
from tqdm import tqdm
from scipy.io.idl import readsav
from astropy.table import Table
from astropy.io import fits
import copy
from multiprocessing import Pool, Process, cpu_count
from scipy.interpolate import interp1d
import tarfile
from glob import glob
# Extra-modules
import argparse
from argparse import ArgumentParser
from argparse import HelpFormatter
from operator import attrgetter
from tqdm import tqdm
## --------- General functions ------------##
class SortingHelpFormatter(HelpFormatter):
def add_arguments(self, actions):
"""
Modifier for `argparse` help parameters, that sorts them alphabetically
"""
actions = sorted(actions, key=attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
def _str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _check_pos_val(val, val_min=0):
"""
Checks if value is larger than `val_min`
Parameters
----------
val : `int` or `float`
Value to be evaluated by `val_min`
val_min: `float` or `int`, optional
minimum value that `val` can be. This value is set to `0` by default.
Returns
-------
ival : `float`
Value if `val` is larger than `val_min`
Raises
-------
ArgumentTypeError : Raised if `val` is NOT larger than `val_min`
"""
ival = float(val)
if ival <= val_min:
msg = '`{0}` is an invalid input!'.format(ival)
msg += '`val` must be larger than `{0}`!!'.format(val_min)
raise argparse.ArgumentTypeError(msg)
return ival
def get_parser():
"""
Get parser object for `eco_mocks_create.py` script.
Returns
-------
args:
input arguments to the script
"""
## Define parser object
description_msg = 'Description of Script'
parser = ArgumentParser(description=description_msg,
formatter_class=SortingHelpFormatter,)
##
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
## Variables
# Size of the cube
parser.add_argument('-sizecube',
dest='size_cube',
help='Length of simulation cube in Mpc/h',
type=float,
default=130.)
## Type of Abundance matching
parser.add_argument('-abopt',
dest='catl_type',
help='Type of Abund. Matching used in catalogue',
type=str,
choices=['mr'],
default='mr')
# Median Redshift
parser.add_argument('-zmed',
dest='zmedian',
help='Median Redshift of the survey',
type=float,
default=0.)
# Type of survey
parser.add_argument('-survey',
dest='survey',
help='Type of survey to produce. Choices: A, B, ECO',
type=str,
choices=['A','B','ECO'],
default='ECO')
# Halo definition
parser.add_argument('-halotype',
dest='halotype',
help='Type of halo definition.',
type=str,
choices=['mvir','m200b'],
default='mvir')
# Cosmology used for the project
parser.add_argument('-cosmo',
dest='cosmo_choice',
help='Cosmology to use. Options: 1) Planck, 2) LasDamas',
type=str,
default='Planck',
choices=['Planck','LasDamas'])
# Halomass function
parser.add_argument('-hmf',
dest='hmf_model',
help='Halo Mass Function choice',
type=str,
default='warren',
choices=['warren','tinker08'])
## Redshift-space distortions
parser.add_argument('-zspace',
dest='zspace',
help="""
Option for adding redshift-space distortions (RSD).
Options: (1) = No RSD, (2) With RSD""",
type=int,
choices=[1,2],
default=2)
## Minimum of galaxies in a group
parser.add_argument('-nmin',
dest='nmin',
help='Minimum number of galaxies in a galaxy group',
type=int,
choices=range(1,1000),
metavar='[1-1000]',
default=1)
## Perpendicular Linking Length
parser.add_argument('-l_perp',
dest='l_perp',
help='Perpendicular linking length',
type=_check_pos_val,
default=0.07)
## Parallel Linking Length
parser.add_argument('-l_para',
dest='l_para',
help='Parallel linking length',
type=_check_pos_val,
default=1.1)
## Random Seed
parser.add_argument('-seed',
dest='seed',
help='Random seed to be used for the analysis',
type=int,
metavar='[0-4294967295]',
default=1)
## Option for removing file
parser.add_argument('-remove',
dest='remove_files',
help="""
Delete files created by the script, in case the exist
already""",
type=_str2bool,
default=False)
## Program message
parser.add_argument('-progmsg',
dest='Prog_msg',
help='Program message to use throught the script',
type=str,
default=cfutils.Program_Msg(__file__))
## CPU Counts
parser.add_argument('-cpu',
dest='cpu_frac',
help='Fraction of total number of CPUs to use',
type=float,
default=0.75)
## Verbose
parser.add_argument('-v','--verbose',
dest='verbose',
help='Option to print out project parameters',
type=_str2bool,
default=False)
## Parsing Objects
args = parser.parse_args()
return args
def param_vals_test(param_dict):
"""
Checks if values are consistent with each other.
Parameters
-----------
param_dict : `dict`
Dictionary with `project` variables
Raises
-----------
ValueError : Error
This function raises a `ValueError` error if one or more of the
required criteria are not met
"""
## Size of the cube
assert(param_dict['size_cube'] == 130.)
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
# from whichcraft import which
from shutil import which
return which(name) is not None
def add_to_dict(param_dict):
"""
Aggregates extra variables to dictionary
Parameters
----------
param_dict : `dict`
dictionary with input parameters and values
Returns
----------
param_dict : `dict`
dictionary with old and new values added
"""
## Central/Satellite designations
cens = int(1)
sats = int(0)
##
## ECO-related files
url_catl = 'http://lss.phy.vanderbilt.edu/groups/data_eco_vc/'
cweb.url_checker(url_catl)
##
## Mock cubes - Path
url_mock_cubes = 'http://lss.phy.vanderbilt.edu/groups/data_eco_vc/ECO_CAM/ECO/'
cweb.url_checker(url_mock_cubes)
## Survey name
if param_dict['survey'] == 'ECO':
survey_name = 'ECO'
else:
survey_name = 'RESOLVE_{0}'.format(param_dict['survey'])
##
## Plotting constants
plot_dict = plot_const()
##
## Variable constants
const_dict = val_consts()
# Dictionary of Halobias files
hb_files_dict = param_dict['survey_args'].halobias_files_dict()
n_hb_files = len(hb_files_dict.keys())
# Cosmological model and Halo Mass function
cosmo_model = param_dict['survey_args'].cosmo_create()
# Redshift and Comoving Distances
z_dc_pd = param_dict['survey_args'].comoving_z_distance()
# Mass Function
mf_pd = param_dict['survey_args'].hmf_calc()
# Survey Coordinate dictionary
param_dict = survey_specs(param_dict)
##
## Saving to dictionary
param_dict['cens' ] = cens
param_dict['sats' ] = sats
param_dict['url_catl' ] = url_catl
param_dict['url_mock_cubes'] = url_mock_cubes
param_dict['plot_dict' ] = plot_dict
param_dict['const_dict' ] = const_dict
param_dict['survey_name' ] = survey_name
param_dict['hb_files_dict' ] = hb_files_dict
param_dict['n_hb_files' ] = n_hb_files
param_dict['cosmo_model' ] = cosmo_model
param_dict['z_dc_pd' ] = z_dc_pd
param_dict['mf_pd' ] = mf_pd
return param_dict
def plot_const():
"""
Returns constants for plotting
Returns
-------
plot_dict: python dictionary
dictionary with text labels, fontsizes, etc.
"""
# Size labels
size_label = 20
size_title = 25
# Markers
markersize = 3.
# Dictionary
plot_dict = {}
plot_dict['size_label'] = size_label
plot_dict['title' ] = size_title
plot_dict['markersize'] = markersize
return plot_dict
def val_consts():
"""
Dictionary with variable constants
Returns
--------
val_dict: python dictionary
python dictionary with values of variables used throughout the script
"""
## Speed of light - Units km/s
c = ac.c.to(u.km/u.s).value
const_dict = {}
const_dict['c'] = c
return const_dict
def directory_skeleton(param_dict, proj_dict):
"""
Creates the directory skeleton for the current project
Parameters
----------
param_dict : `dict`
Dictionary with `project` variables
proj_dict : `dict`
Dictionary with info of the project that uses the
`Data Science` Cookiecutter template.
Returns
---------
proj_dict : `dict`
Dictionary with current and new paths to project directories
"""
# Directory of Cosmological files
cosmo_dir = param_dict['survey_args'].cosmo_outdir(create_dir=True)
# Halo Mass Function - Directory
mf_dir = param_dict['survey_args'].mass_func_output_dir(create_dir=True)
##
## Saving to dictionary
proj_dict['cosmo_dir'] = cosmo_dir
proj_dict['mf_dir' ] = mf_dir
return proj_dict
def tarball_create(hb_ii_name, param_dict, proj_dict, catl_ext='hdf5'):
"""
Creates TAR object with mock catalogues, figures and README file
Parameters
-----------
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
dictionary with info of the project that uses the
`Data Science` Cookiecutter template.
catl_ext: string, optional (default = 'hdf5')
file extension of the `mock` catalogues created.
"""
Prog_msg = param_dict['Prog_msg' ]
## List of Mock catalogues
catl_path_arr = param_dict['survey_args'].hb_gal_catl_files_list(
hb_ii_name, catl_kind='memb', perf=False, file_ext=catl_ext)
## README file
# Downloading working README file
fig_outdir = param_dict['survey_args'].fig_outdir(hb_ii_name,
create_dir=True)
fig_file = glob('{0}/*xyz*.pdf'.format(fig_outdir))[0]
# README file
readme_dir = os.path.join( param_dict['survey_args'].proj_dict['base_dir'],
'references')
readme_file = glob('{0}/ECO_Mocks_VC.md'.format(readme_dir))[0]
# README file
# readme_file = os.path.join( proj_dict['base_dir'],
# 'references',
# 'README_RTD.pdf')
# cfutils.File_Download_needed(readme_file, param_dict['readme_url'])
# cfutils.File_Exists(readme_file)
## Saving to TAR file
tar_file_path = param_dict['survey_args'].tar_output_file(hb_ii_name)
# Opening file
with tarfile.open(tar_file_path, mode='w:gz') as tf:
tf.add(readme_file, arcname=os.path.basename(readme_file))
tf.add(fig_file, arcname=os.path.basename(fig_file))
for file_kk in catl_path_arr:
## Reading in DataFrame
gal_pd_kk = cfreaders.read_hdf5_file_to_pandas_DF(file_kk)
## DataFrame `without` certain columns
gal_pd_mod = catl_drop_cols(gal_pd_kk)
## Saving modified DataFrame to file
file_mod_kk = file_kk+'.mod'
cfreaders.pandas_df_to_hdf5_file(gal_pd_mod, file_mod_kk,
key='gal_catl')
cfutils.File_Exists(file_mod_kk)
# Saving to Tar-file
tf.add(file_mod_kk, arcname=os.path.basename(file_kk))
# Deleting extra file
os.remove(file_mod_kk)
tf.close()
cfutils.File_Exists(tar_file_path)
if param_dict['verbose']:
print('{0} TAR file saved as: {1}'.format(Prog_msg, tar_file_path))
def catl_drop_cols(mockgal_pd):
"""
Drops certain columns from the galaxy DataFrame
Parameters
-----------
mockgal_pd: pandas DataFrame
DataFrame containing information for each mock galaxy.
Includes galaxy properties + group ID
Returns
-----------
gal_pd_mod: pandas DataFrame
Updated version of the DataFrame containing information for each
mock galaxy.
"""
## Copies of DataFrames
gal_pd = mockgal_pd.copy()
## Columns
gal_cols = ['x','y','z','vx','vy','vz','galid','x_orig','y_orig','z_orig',
'vel_pec','ra_orig']
# New object `without` these columns
gal_pd_mod = gal_pd.loc[:,~gal_pd.columns.isin(gal_cols)].copy()
return gal_pd_mod
## --------- Halobias File - Analysis ------------##
## Main analysis of the Halobias File
def hb_analysis(ii, hb_ii_name, param_dict, proj_dict):
"""
Main function that analyzes the Halobias file and constructs a set of
mock catalogues.
Parameters
------------
ii : `int`
Integer of the halobias file being analyzed, after having ordered
the list of files alphabetically.
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
param_dict : `dict`
Dictionary with the `project` variables.
proj_dict : `dict`
Dictionary with current and new paths to project directories.
ext : `str`
Extension to use for the resulting catalogues.
"""
## Extract data from Halobias File
hb_ii_pd = hb_file_extract_data(hb_ii_name, param_dict)
## Carving out geometry of Survey and carrying out the analysis
if (param_dict['survey'] == 'ECO'):
eco_geometry_mocks(hb_ii_pd, hb_ii_name, param_dict, proj_dict)
## Plotting different catalogues in simulation box
mockcatls_simbox_plot(hb_ii_name, param_dict, proj_dict)
## Luminosity function for each catalogue
mocks_lum_function(hb_ii_name, param_dict, proj_dict)
##
## Saving everything to TARBALL
tarball_create(hb_ii_name, param_dict, proj_dict)
## Reading and extracting data From Halobias file
def hb_file_extract_data(hb_ii_name, param_dict):
"""
Extracts the data from the Halobias file being analyzed.
Parameters
------------
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
param_dict : `dict`
Dictionary with the `project` variables.
Returns
------------
hb_ii_pd : `pandas.DataFrame`
DataFrame containing main info from Halobias being analyzed.
"""
# Halobias filename
hb_ii_file = param_dict['hb_files_dict'][hb_ii_name]
# Reading in file
hb_ii_pd = cfreaders.read_hdf5_file_to_pandas_DF(hb_ii_file)
# Adding extra halo properties
hb_ii_pd = hb_extras(hb_ii_pd, param_dict)
# Distance between central and satellites
hb_ii_pd = cen_sat_dist_calc(hb_ii_pd, param_dict)
return hb_ii_pd
## Extra Halo properties
def hb_extras(hb_ii_pd, param_dict):
"""
Determines the Central/Satellite designation for each galaxy in the
halobias file.
Parameters
------------
hb_ii_pd : `pandas.DataFrame`
DataFrame containing main info from Halobias being analyzed.
param_dict : `dict`
Dictionary with the `project` variables.
Returns
------------
hb_ii_pd : `pandas.DataFrame`
DataFrame containing main info from Halobias being analyzed +
new galaxy Central/Satellite designations.
"""
## Constants
failval = num.nan
ngals = len(hb_ii_pd)
cens = param_dict['cens']
sats = param_dict['sats']
# Initializing new columns of galaxy type and Halo Mass
hb_ii_pd.loc[:, 'cs_flag' ] = 0
##
## Central/Satellite - Indices
cen_idx = hb_ii_pd.loc[hb_ii_pd['halo_upid'] == -1].index.values
sat_idx = hb_ii_pd.loc[hb_ii_pd['halo_upid'] != -1].index.values
## Cen/Sat Designations
hb_ii_pd.loc[cen_idx, 'cs_flag'] = cens
hb_ii_pd.loc[sat_idx, 'cs_flag'] = sats
##
## Total number of galaxies per halo
haloid_ngal_counter = Counter(hb_ii_pd['halo_hostid'])
haloid_arr = hb_ii_pd['halo_hostid'].values
haloid_counts = [[] for x in range(ngals)]
for gal in tqdm(range(ngals)):
haloid_counts[gal] = haloid_ngal_counter[haloid_arr[gal]]
# Assigning to DataFrame
hb_ii_pd.loc[:, 'haloid_host_ngal'] = num.array(haloid_counts).astype(int)
##
hb_ii_pd.loc[:, 'log_host_mvir'] = num.log10(hb_ii_pd['halo_mvir_host_halo'])
return hb_ii_pd
## Distance between centrals and satellites
def cen_sat_dist_calc(hb_ii_pd, param_dict):
"""
Computes the distance between the central galaxy and its corresponding
satellite galaxies in a given DM halo.
Parameters
-----------
hb_ii_pd : `pandas.DataFrame`
DataFrame containing main info from Halobias being analyzed.
param_dict : `dict`
Dictionary with the `project` variables.
Returns
-----------
hb_ii_pd : `pandas.DataFrame`
DataFrame containing main info from Halobias being analyzed + info
on the cen-sat distances.
"""
Prog_msg = param_dict['Prog_msg']
if param_dict['verbose']:
print('{0} Distance-Central Assignment ...'.format(Prog_msg))
##
## Centrals and Satellites
cens = param_dict['cens']
sats = param_dict['sats']
dist_c_label = 'dist_c'
dist_sq_c_label = 'dist_c_sq'
## Galaxy coordinates
coords = ['x' ,'y' , 'z' ]
coords2 = ['x_sq','y_sq','z_sq']
## Unique HaloIDs
haloid_unq = num.unique(hb_ii_pd.loc[hb_ii_pd['haloid_host_ngal'] > 1,
'halo_hostid'])
n_halo_unq = len(haloid_unq)
# Desired columns
hb_cols_select = ['x','y','z', 'cs_flag', 'halo_hostid']
# Copy of `hb_ii_pd`
hb_ii_pd_mod = hb_ii_pd[hb_cols_select].copy()
# Initializing new column in `hb_ii_pd_mod`
hb_ii_pd_mod.loc[:, dist_sq_c_label] = num.zeros(hb_ii_pd_mod.shape[0])
# Positions squared
hb_ii_pd_mod.loc[:, 'x_sq'] = hb_ii_pd_mod['x']**2
hb_ii_pd_mod.loc[:, 'y_sq'] = hb_ii_pd_mod['y']**2
hb_ii_pd_mod.loc[:, 'z_sq'] = hb_ii_pd_mod['z']**2
# Looping over number of haloes
tqdm_desc = 'Cen-Sat Distances'
for ii, halo_ii in enumerate(tqdm(haloid_unq, desc=tqdm_desc)):
# Halo ID subsample
halo_ii_pd = hb_ii_pd_mod.loc[hb_ii_pd['halo_hostid'] == halo_ii]
# Cens and Sats DataFrames
cens_coords = halo_ii_pd.loc[halo_ii_pd['cs_flag'] == cens, coords]
sats_coords = halo_ii_pd.loc[halo_ii_pd['cs_flag'] == sats, coords]
sats_idx = sats_coords.index.values
# Distance from central galaxy
cens_coords_mean = cens_coords.mean(axis=0).values
# Difference in coordinates
dist_sq_arr = num.sum(
sats_coords.subtract(cens_coords_mean, axis=1).values**2, axis=1)
# Assigning distances to each satellite
hb_ii_pd_mod.loc[sats_idx, dist_sq_c_label] = dist_sq_arr
##
## Taking the square root of distances
hb_ii_pd_mod.loc[:, dist_c_label] = (hb_ii_pd_mod[dist_sq_c_label].values)**.5
# Assigning it to 'hb_ii_pd'
hb_ii_pd.loc[:, dist_c_label] = hb_ii_pd_mod[dist_c_label].values
if param_dict['verbose']:
print('{0} Distance-Central Assignment ... Done'.format(Prog_msg))
return hb_ii_pd
## --------- Makemock-related - Analysis ------------##
## Geometry of ECO catalogues
def eco_geometry_mocks(hb_ii_pd, hb_ii_name, param_dict, proj_dict):
"""
Carves out the geometry of the `ECO` survey and produces set
of mock catalogues
Parameters
-------------
hb_ii_pd : `pandas.DataFrame`
DataFrame containing information from Halobias + other Halo-related
information.
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
param_dict : `dict`
Dictionary with the `project` variables.
proj_dict : `dict`
Dictionary with info of the paths and directories used throughout
this project.
"""
## Constants
Prog_msg = param_dict['Prog_msg']
if param_dict['verbose']:
print('{0} Creating Mock Catalogues ....'.format(Prog_msg))
## Coordinates dictionary
coord_dict = param_dict['coord_dict'].copy()
## Coordinate and Dataframe lists
pos_coords_mocks = []
##############################################
###### ----- X-Y Upper Left Mocks -----######
##############################################
hb_ul_pd = copy.deepcopy(hb_ii_pd)
coord_dict_ul = coord_dict.copy()
# Coordinates
coord_dict_ul['ra_min'] = 0.
coord_dict_ul['ra_max'] = coord_dict_ul['ra_range']
coord_dict_ul['ra_diff'] = coord_dict_ul['ra_max_real'] - coord_dict_ul['ra_max']
gap_ul = 1.
x_init_ul = 20.
y_init_ul = 0.
z_init_ul = 5.
z_delta_ul = gap_ul + coord_dict_ul['d_th']
if coord_dict_ul['dec_min'] < 0.:
z_init_ul += num.abs(coord_dict_ul['dec_min'])
z_mocks_n_ul = int(num.floor(param_dict['size_cube']/z_delta_ul))
## Determining positions
for kk in range(z_mocks_n_ul):
pos_coords_mocks.append([ x_init_ul, y_init_ul, z_init_ul,
hb_ul_pd.copy(), coord_dict_ul])
z_init_ul += z_delta_ul
##############################################
###### ----- X-Y Upper Right Mocks -----######
##############################################
hb_ur_pd = copy.deepcopy(hb_ii_pd)
coord_dict_ur = copy.deepcopy(coord_dict_ul)
# Coordinates
coord_dict_ur['ra_min' ] = 180.
coord_dict_ur['ra_max' ] = 180. + coord_dict_ur['ra_range']
coord_dict_ur['ra_diff'] = coord_dict_ur['ra_max_real'] - coord_dict_ur['ra_max']
gap_ur = 1.
x_init_ur = param_dict['size_cube'] - 20.
y_init_ur = param_dict['size_cube'] - 3.
z_init_ur = 5.
z_delta_ur = gap_ur + coord_dict_ur['d_th']
if coord_dict_ur['dec_min'] < 0.:
z_init_ur += num.abs(coord_dict_ur['dec_min'])
z_mocks_n_ur = int(num.floor(param_dict['size_cube']/z_delta_ur))
## Determining positions
for kk in range(z_mocks_n_ur):
pos_coords_mocks.append([ x_init_ur, y_init_ur, z_init_ur,
hb_ur_pd.copy(), coord_dict_ur])
z_init_ur += z_delta_ur
##############################################
## Creating mock catalogues
##############################################
##
## ----| Multiprocessing |---- ##
##
## Number of catalogues
n_catls = len(pos_coords_mocks)
# Creating individual catalogues
for zz_mock, pos_coords_mocks_zz in enumerate(pos_coords_mocks):
# Making z'th catalogue
catl_create_main(zz_mock, hb_ii_name, pos_coords_mocks_zz,
param_dict, proj_dict)
##
## Reinitializing `param_dict` to None
if param_dict['verbose']:
print('{0} Creating Mock Catalogues .... Done'.format(Prog_msg))
## Main function for creating the mock catalogues
def catl_create_main(zz_mock, hb_ii_name, pos_coords_mocks_zz, param_dict,
proj_dict):
"""
Distributes the analyis of the creation of mock catalogues into
more than 1 processor
Parameters
-----------
zz_mock : `int`
number of the mock catalogue being analyzed
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
pos_coords_mocks: tuples, shape (4,)
tuple with the positons coordinates, coordinate dictionary,
and DataFrame to be used
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
dictionary with info of the project that uses the
`Data Science` Cookiecutter template.
Returns
-----------
"""
## Constants
Prog_msg = param_dict['Prog_msg']
## Deciding which catalogues to read
## Reading in input parameters
# Copy of 'pos_coords_mocks_zz'
pos_coords_mocks_zz_copy = copy.deepcopy(pos_coords_mocks_zz)
# Paramters
( x_ii ,
y_ii ,
z_ii ,
hb_ii ,
coord_dict_ii) = pos_coords_mocks_zz_copy
## Size of cube
size_cube = float(param_dict['size_cube'])
## Cartesian coordinates
pos_zz = num.asarray([x_ii, y_ii, z_ii])
## Formatting new positions
## Placing the observer at `pos_zz` and centering coordinates to center
## of box
for kk, coord_kk in enumerate(['x','y','z']):
# Keeping original Coordinates
hb_ii.loc[:, coord_kk + '_orig'] = hb_ii[coord_kk].values
## Moving observer
hb_ii.loc[:,coord_kk] = hb_ii[coord_kk] - pos_zz[kk]
## Periodic boundaries
clf_ii_neg = hb_ii.loc[hb_ii[coord_kk] <= -(size_cube/2.)].index
clf_ii_pos = hb_ii.loc[hb_ii[coord_kk] >= (size_cube/2.)].index
## Fixing negative values
if len(clf_ii_neg) != 0:
hb_ii.loc[clf_ii_neg, coord_kk] += size_cube
if len(clf_ii_pos) != 0:
hb_ii.loc[clf_ii_pos, coord_kk] -= size_cube
##
## Interpolating values for redshift and comoving distance
## and adding redshift-space distortions
( mock_pd ,
mock_zz_file) = makemock_catl( hb_ii, hb_ii_name, coord_dict_ii,
zz_mock, param_dict, proj_dict)
##
## Group-finding
( mockgal_pd ,
mockgroup_pd) = group_finding( mock_pd, mock_zz_file,
param_dict, proj_dict)
##
## Group mass, group galaxy type, and total Mr/Mstar for groups
( mockgal_pd ,
mockgroup_pd) = group_mass_assignment(mockgal_pd, mockgroup_pd,
param_dict, proj_dict)
##
## Halo Rvir
mockgal_pd = halos_rvir_calc(mockgal_pd, param_dict)
##
## Dropping columns from `mockgal_pd` and `mockgroup_pd`
##
## Writing output files - `Normal Catalogues`
writing_to_output_file(mockgal_pd, mockgroup_pd, zz_mock, hb_ii_name,
param_dict, proj_dict, perf_catl=False)
def makemock_catl(hb_ii, hb_ii_name, coord_dict_ii, zz_mock, param_dict,
proj_dict):
"""
Function that calculates distances and redshift-space distortions
for the galaxies that make it into the catalogues
Parameters
-----------
hb_ii: pandas DataFrame
DataFrame with the information on galaxies, along with position coords,
velocities, etc.
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
coord_dict_ii: python dictionary
dictionary with RA, DEC, and other geometrical variables used
throughout this script.
zz_mock: int
number of the mock catalogue being analyzed
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
dictionary with info of the project that uses the
`Data Science` Cookiecutter template.
Returns
-----------
gal_idx: pandas DataFrame
Updated Dataframe with new positions, coordinates, etc.
"""
## Constants
Prog_msg = param_dict['Prog_msg']
if param_dict['verbose']:
print('{0} Creating Mock Catalogue [{1}] ....'.format(Prog_msg,
zz_mock))
## Galaxy Directory
mock_catl_ii_dir = param_dict['survey_args'].catl_output_dir(hb_ii_name,
catl_kind='gal', perf=False, create_dir=True)
## Galaxy File
mock_catl_pd_file = os.path.join( mock_catl_ii_dir,
'{0}_{1}_galcatl_cat_{2}.hdf5'.format(
param_dict['survey'],
hb_ii_name,
zz_mock))
## Number of galaies
hb_ngal = len(hb_ii)
speed_c = param_dict['const_dict']['c']
## Distances from observer to galaxies
z_dc_pd = param_dict['z_dc_pd']
dc_max = z_dc_pd['dc'].max()
dc_z_interp = interp1d(z_dc_pd['dc'], z_dc_pd['z'])
## Redshift-space distortions
# Cartesina Coordinates
cart_gals = hb_ii[['x' ,'y' ,'z' ]].values
vel_gals = hb_ii[['vx','vy','vz']].values
## Initializing arrays
r_dist_arr = num.zeros(hb_ngal)
ra_arr = num.zeros(hb_ngal)
dec_arr = num.zeros(hb_ngal)
cz_arr = num.zeros(hb_ngal)
cz_nodist_arr = num.zeros(hb_ngal)
vel_tan_arr = num.zeros(hb_ngal)
vel_tot_arr = num.zeros(hb_ngal)
vel_pec_arr = num.zeros(hb_ngal)
# Looping over all galaxies
for kk in tqdm(range(hb_ngal)):
cz_local = -1.
## Distance From observer
r_dist = (num.sum(cart_gals[kk]**2))**.5
assert(r_dist <= dc_max)
## Velocity in km/s
cz_local = speed_c * dc_z_interp(r_dist)
cz_val = cz_local
## Right Ascension and declination
( ra_kk,
dec_kk) = mock_cart_to_spherical_coords(cart_gals[kk], r_dist)
## Whether or not to add redshift-space distortions
if param_dict['zspace'] == 1:
vel_tot = 0.
vel_tan = 0.
vel_pec = 0.
elif param_dict['zspace'] == 2:
vr = num.dot(cart_gals[kk], vel_gals[kk])/r_dist
cz_val += vr * (1. + param_dict['zmedian'])
vel_tot = (num.sum(vel_gals[kk]**2))**.5
vel_tan = (vel_tot**2 - vr**2)**.5
vel_pec = (cz_val - cz_local)/(1. + param_dict['zmedian'])
##
## Saving to arrays
r_dist_arr [kk] = r_dist
ra_arr [kk] = ra_kk
dec_arr [kk] = dec_kk
cz_arr [kk] = cz_val
cz_nodist_arr[kk] = cz_local
vel_tot_arr [kk] = vel_tot
vel_tan_arr [kk] = vel_tan
vel_pec_arr [kk] = vel_pec
##
## Assigning to DataFrame
hb_ii.loc[:,'r_dist' ] = r_dist_arr
hb_ii.loc[:,'ra' ] = ra_arr
hb_ii.loc[:,'dec' ] = dec_arr
hb_ii.loc[:,'cz' ] = cz_arr
hb_ii.loc[:,'cz_nodist'] = cz_nodist_arr
hb_ii.loc[:,'vel_tot' ] = vel_tot_arr
hb_ii.loc[:,'vel_tan' ] = vel_tan_arr
hb_ii.loc[:,'vel_pec' ] = vel_pec_arr
##
## Selecting galaxies with `czmin` and `czmax` criteria
# Right Ascension
if coord_dict_ii['ra_min'] < 0.:
ra_min_mod = coord_dict_ii['ra_min'] + 360.
mock_pd = hb_ii.loc[(hb_ii['dec'] >= coord_dict_ii['dec_min']) &
(hb_ii['dec'] <= coord_dict_ii['dec_max']) &
(hb_ii['abs_rmag'] != 0.) &
(hb_ii['abs_rmag'] <= param_dict['mr_limit'])].copy()
mock_pd = mock_pd.loc[~( (mock_pd['ra'] < ra_min_mod) &
(mock_pd['ra'] > coord_dict_ii['ra_max']))]
# ra_idx1 = hb_ii.loc[(hb_ii['ra'] < (coord_dict_ii['ra_min'] + 360))&
# (hb_ii['ra'] > coord_dict_ii['ra_max'])].index
# ra_idx1 = ra_idx1.values
# idx_arr = num.arange(0, hb_ngal)
# ra_idx = num.delete(idx_arr, ra_idx1).astype(int)
elif coord_dict_ii['ra_min'] >= 0.:
mock_pd = hb_ii.loc[(hb_ii['ra'] >= coord_dict_ii['ra_min']) &
(hb_ii['ra'] <= coord_dict_ii['ra_max']) &
(hb_ii['dec'] >= coord_dict_ii['dec_min']) &
(hb_ii['dec'] <= coord_dict_ii['dec_max']) &
(hb_ii['abs_rmag'] != 0.) &
(hb_ii['abs_rmag'] <= param_dict['mr_limit'])].copy()
# ra_idx = hb_ii.loc[(hb_ii['ra'] >= coord_dict_ii['ra_min']) &
# (hb_ii['ra'] <= coord_dict_ii['ra_max'])].index
# ra_idx = ra_idx.values
# Declination
# dec_idx = hb_ii.loc[ (hb_ii['dec'] >= coord_dict_ii['dec_min']) &
# (hb_ii['dec'] <= coord_dict_ii['dec_max'])].index.values
# mr_idx = hb_ii.loc[hb_ii['abs_rmag'] != 0.].index.values
# ra_dec_mr_idx = num.intersect1d(num.intersect1d(ra_idx, dec_idx), mr_idx)
##
## Velocity limits
mock_pd = mock_pd.loc[ (mock_pd['cz'] >= param_dict['czmin']) &
(mock_pd['cz'] <= param_dict['czmax'])]
##
## New Catalogue
if len(mock_pd) != 0:
## Chaning RA values
if coord_dict_ii['ra_min'] < 0.:
ra_min_limit = coord_dict_ii['ra_min'] + 360.
ra_new_arr = mock_pd['ra'].values
ra_except_idx = num.where( (ra_new_arr >= ra_min_limit) &
(ra_new_arr <= 360.))[0]
ra_new_arr[ra_except_idx] += (-360.) + coord_dict_ii['ra_diff']
ra_normal_idx = num.where( (ra_new_arr >= 0.) &
(ra_new_arr <= coord_dict_ii['ra_max']))[0]
ra_new_arr[ra_normal_idx] += coord_dict_ii['ra_diff']
ra_neg_idx = num.where(ra_new_arr < 0.)[0]
if len(ra_neg_idx) != 0.:
ra_new_arr[ra_neg_idx] += 360.
elif coord_dict_ii['ra_min'] >= 0.:
ra_new_arr = mock_pd['ra'].values
ra_new_arr += coord_dict_ii['ra_diff']
ra_neg_idx = num.where(ra_new_arr < 0.)[0]
if len(ra_neg_idx) != 0:
ra_new_arr[ra_neg_idx] += 360.
##
## Saving new array to DataFrame
ra_orig_arr = mock_pd['ra'].values
# Assigning new values for RA
mock_pd.loc[:,'ra' ] = ra_new_arr
mock_pd.loc[:,'ra_orig'] = ra_orig_arr
##
## Resetting indices
mock_pd.reset_index(inplace=True, drop=True)
##
## Assert that coordinates fall within Survey limits
assert( (mock_pd['ra' ].min() >= coord_dict_ii['ra_min_real']) &
(mock_pd['ra' ].max() <= coord_dict_ii['ra_max_real']) &
(mock_pd['dec'].min() >= coord_dict_ii['dec_min' ]) &
(mock_pd['dec'].max() <= coord_dict_ii['dec_max' ]))
##
## Saving file to Pandas DataFrame
cfreaders.pandas_df_to_hdf5_file(mock_pd, mock_catl_pd_file, key='galcatl')
Prog_msg = param_dict['Prog_msg']
if param_dict['verbose']:
print('{0} Creating Mock Catalogues [{1}]....Done'.format(Prog_msg,
zz_mock))
return mock_pd, mock_catl_pd_file
def group_finding(mock_pd, mock_zz_file, param_dict, proj_dict,
file_ext='csv'):
"""
Runs the group finder `FoF` on the file, and assigns galaxies to
galaxy groups
Parameters
-----------
mock_pd: pandas DataFrame
DataFrame with positions, velocities, and more for the
galaxies that made it into the catalogue
mock_zz_file: string
path to the galaxy catalogue
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
Dictionary with current and new paths to project directories
file_ext: string, optional (default = 'csv')
file extension for the FoF file products
Returns
-----------
mockgal_pd_merged: pandas DataFrame
DataFrame with the info on each mock galaxy + their group properties
mockgroup_pd: pandas DataFrame
DataFrame with the info on each mock galaxy group
"""
## Constants
Prog_msg = param_dict['Prog_msg']
if param_dict['verbose']:
print('{0} Group Finding ....'.format(Prog_msg))
# Speed of light - in km/s
speed_c = param_dict['const_dict']['c']
##
## Running FoF
# File prefix
# Defining files for FoF output and Mock coordinates
fof_file = '{0}.galcatl_fof.{1}'.format(mock_zz_file, file_ext)
grep_file = '{0}.galcatl_grep.{1}'.format(mock_zz_file, file_ext)
grep_g_file = '{0}.galcatl_grep_g.{1}'.format(mock_zz_file, file_ext)
mock_coord_path = '{0}.galcatl_radeccz.{1}'.format(mock_zz_file, file_ext)
## RA-DEC-CZ file
mock_coord_pd = mock_pd[['ra','dec','cz']].to_csv(mock_coord_path,
sep=' ', header=None, index=False)
cfutils.File_Exists(mock_coord_path)
## Creating `FoF` command and executing it
fof_exe = os.path.join( cwpaths.get_code_c(), 'bin', 'fof9_ascii')
cfutils.File_Exists(fof_exe)
# FoF command
fof_str = '{0} {1} {2} {3} {4} {5} {6} {7} > {8}'
fof_arr = [ fof_exe,
param_dict['survey_vol'],
param_dict['zmin'],
param_dict['zmax'],
param_dict['l_perp'],
param_dict['l_para'],
param_dict['nmin'],
mock_coord_path,
fof_file]
fof_cmd = fof_str.format(*fof_arr)
# Executing command
if param_dict['verbose']:
print(fof_cmd)
subprocess.call(fof_cmd, shell=True)
##
## Parsing `fof_file` - Galaxy and Group files
gal_cmd = 'grep G -v {0} > {1}'.format(fof_file, grep_file)
group_cmd = 'grep G {0} > {1}'.format(fof_file, grep_g_file)
# Running commands
if param_dict['verbose']:
print(gal_cmd )
print(group_cmd)
subprocess.call(gal_cmd , shell=True)
subprocess.call(group_cmd, shell=True)
##
## Extracting galaxy and group information
# Column names
gal_names = ['groupid', 'galid', 'ra', 'dec', 'z']
group_names = [ 'G', 'groupid', 'cen_ra', 'cen_dec', 'cen_z', 'ngals',\
'sigma_v', 'rproj']
# Pandas DataFrames
# Galaxies
grep_pd = pd.read_csv(grep_file, sep='\s+', header=None, names=gal_names,
index_col='galid').sort_index()
grep_pd.index.name = None
# Converting redshift to velocity
grep_pd.loc[:,'cz'] = grep_pd['z'] * speed_c
grep_pd = grep_pd.drop('z', axis=1)
# Galaxy groups
mockgroup_pd = pd.read_csv(grep_g_file, sep='\s+', header=None,
names=group_names)
# Group centroid velocity
mockgroup_pd.loc[:,'cen_cz'] = mockgroup_pd['cen_z'] * speed_c
mockgroup_pd = mockgroup_pd.drop('cen_z', axis=1)
mockgroup_pd = mockgroup_pd.drop('G', axis=1)
## Joining the 2 datasets for galaxies
mockgal_pd_merged = pd.concat([mock_pd, grep_pd['groupid']], axis=1)
# Removing `1` from `groupid`
mockgroup_pd.loc [:,'groupid'] -= 1
mockgal_pd_merged.loc[:,'groupid'] -= 1
## Removing FoF files
if param_dict['verbose']:
print('{0} Removing group-finding related files'.format(
param_dict['Prog_msg']))
os.remove(fof_file)
os.remove(grep_file)
os.remove(grep_g_file)
os.remove(mock_coord_path)
Prog_msg = param_dict['Prog_msg']
if param_dict['verbose']:
print('{0} Group Finding ....Done'.format(Prog_msg))
return mockgal_pd_merged, mockgroup_pd
def group_mass_assignment(mockgal_pd, mockgroup_pd, param_dict, proj_dict):
"""
Assigns a theoretical halo mass to the group based on a group property
Parameters
-----------
mockgal_pd: pandas DataFrame
DataFrame containing information for each mock galaxy.
Includes galaxy properties + group ID
mockgroup_pd: pandas DataFrame
DataFame containing information for each galaxy group
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
Dictionary with current and new paths to project directories
Returns
-----------
mockgal_pd_new: pandas DataFrame
Original info + abundance matched mass of the group, M_group
mockgroup_pd_new: pandas DataFrame
Original info of `mockgroup_pd' + abundance matched mass, M_group
"""
## Constants
Prog_msg = param_dict['Prog_msg']
if param_dict['verbose']:
print('{0} Group Mass Assign. ....'.format(Prog_msg))
## Copies of DataFrames
gal_pd = mockgal_pd.copy()
group_pd = mockgroup_pd.copy()
## Constants
Cens = int(1)
Sats = int(0)
n_gals = len(gal_pd )
n_groups = len(group_pd)
## Type of abundance matching
if param_dict['catl_type'] == 'mr':
prop_gal = 'abs_rmag'
reverse_opt = True
elif param_dict['catl_type'] == 'mstar':
prop_gal = 'logmstar'
reverse_opt = False
# Absolute value of `prop_gal`
prop_gal_abs = prop_gal + '_abs'
##
## Selecting only a `few` columns
# Galaxies
gal_pd = gal_pd.loc[:,[prop_gal, 'groupid']]
# Groups
group_pd = group_pd[['ngals']]
##
## Total `prop_gal` for groups
group_prop_arr = [[] for x in range(n_groups)]
## Looping over galaxy groups
# Mstar-based
# if param_dict['catl_type'] == 'mstar':
# for group_zz in tqdm(range(n_groups)):
# ## Stellar mass
# group_prop = gal_pd.loc[gal_pd['groupid']==group, prop_gal].values
# group_log_prop_tot = num.log10(num.sum(10**group_prop))
# ## Saving to array
# group_prop_arr[group_zz] = group_log_prop_tot
# Luminosity-based
if (param_dict['catl_type'] == 'mr'):
for group_zz in tqdm(range(n_groups)):
## Total abs. magnitude of the group
group_prop = gal_pd.loc[gal_pd['groupid']==group_zz, prop_gal].values
group_prop_tot = Mr_group_calc(group_prop)
## Saving to array
group_prop_arr[group_zz] = group_prop_tot
##
## Saving to DataFrame
group_prop_arr = num.asarray(group_prop_arr)
group_pd.loc[:, prop_gal] = group_prop_arr
if param_dict['verbose']:
print('{0} Calculating group masses...Done'.format(
param_dict['Prog_msg']))
##
## --- Halo Abundance Matching --- ##
## Mass function for given cosmology
mf_pd = param_dict['mf_pd']
mf_dict = dict({ 'var' : mf_pd['logM'].values,
'dens': mf_pd['ngtm'].values})
## Halo mass
Mh_ab = cm.abundance_matching.abundance_matching_f(
group_prop_arr,
mf_dict,
volume1=param_dict['survey_vol'],
dens1_opt=False,
reverse=reverse_opt)
# Assigning to DataFrame
group_pd.loc[:, 'M_group'] = Mh_ab
###
### ---- Galaxies ---- ###
# Adding `M_group` to galaxy catalogue
gal_pd = pd.merge(gal_pd, group_pd[['M_group', 'ngals']],
how='left', left_on='groupid', right_index=True)
# Remaining `ngals` column
gal_pd = gal_pd.rename(columns={'ngals':'g_ngal'})
#
# Selecting `central` and `satellite` galaxies
gal_pd.loc[:, prop_gal_abs] = num.abs(gal_pd[prop_gal])
gal_pd.loc[:, 'g_galtype'] = num.ones(n_gals).astype(int)*Sats
g_galtype_groups = num.ones(n_groups)*Sats
##
## Looping over galaxy groups
for zz in tqdm(range(n_groups)):
gals_g = gal_pd.loc[gal_pd['groupid']==zz]
## Determining group galaxy type
gals_g_max = gals_g.loc[gals_g[prop_gal_abs]==gals_g[prop_gal_abs].max()]
g_galtype_groups[zz] = int(num.random.choice(gals_g_max.index.values))
g_galtype_groups = num.asarray(g_galtype_groups).astype(int)
## Assigning group galaxy type
gal_pd.loc[g_galtype_groups, 'g_galtype'] = Cens
##
## Dropping columns
# Galaxies
gal_col_arr = [prop_gal, prop_gal_abs, 'groupid']
gal_pd = gal_pd.drop(gal_col_arr, axis=1)
# Groups
group_col_arr = ['ngals']
group_pd = group_pd.drop(group_col_arr, axis=1)
##
## Merging to original DataFrames
# Galaxies
mockgal_pd_new = pd.merge(mockgal_pd, gal_pd, how='left', left_index=True,
right_index=True)
# Groups
mockgroup_pd_new = pd.merge(mockgroup_pd, group_pd, how='left',
left_index=True, right_index=True)
if param_dict['verbose']:
print('{0} Group Mass Assign. ....Done'.format(Prog_msg))
return mockgal_pd_new, mockgroup_pd_new
def mock_cart_to_spherical_coords(cart_arr, dist):
"""
Computes the right ascension and declination for the given
point in (x,y,z) position
Parameters
-----------
cart_arr: numpy.ndarray, shape (3,)
array with (x,y,z) positions
dist: float
dist to the point from observer's position
Returns
-----------
ra_val: float
right ascension of the point on the sky
dec_val: float
declination of the point on the sky
"""
## Reformatting coordinates
# Cartesian coordinates
( x_val,
y_val,
z_val) = cart_arr/float(dist)
# Distance to object
dist = float(dist)
## Declination
dec_val = 90. - num.degrees(num.arccos(z_val))
## Right ascension
if x_val == 0:
if y_val > 0.:
ra_val = 90.
elif y_val < 0.:
ra_val = -90.
else:
ra_val = num.degrees(num.arctan(y_val/x_val))
##
## Seeing on which quadrant the point is at
if x_val < 0.:
ra_val += 180.
elif (x_val >= 0.) and (y_val < 0.):
ra_val += 360.
return ra_val, dec_val
def Mr_group_calc(gal_mr_arr):
"""
Calculated total r-band absolute magnitude of the group
Parameters
----------
gal_mr_arr: array_like
array of r-band absolute magnitudes of member galaxies of the group
Returns
-------
group_mr: float
total r-band absolute magnitude of the group
"""
group_lum = num.sum(10.**cmags.absolute_magnitude_to_luminosity(
gal_mr_arr, 'r'))
group_mr = cmags.luminosity_to_absolute_mag(group_lum, 'r')
return group_mr
## ---------| Halo Rvir calculation |------------##
def halos_rvir_calc(mockgal_pd, param_dict, catl_sim_eq=False):
"""
Calculates the virial radius of dark matter halos for each Halo in the
catalogue
Taken from:
http://home.strw.leidenuniv.nl/~franx/college/galaxies10/handout4.pdf
Parameters:
------------
mockgal_pd: pandas DataFrame
DataFrame containing information for each mock galaxy.
Includes galaxy properties + group ID + Ab. Match. Mass
param_dict: python dictionary
dictionary with `project` variables
catl_sim_eq: boolean, optional (default = False)
option to replace the `rvir` of all halos with zeros
when the number of galaxies from a distinct halo DO NOT MATCH the
total number of galaxies from a distinct halo,
i.e. n_catl(halo) == n_sim(halo)
Returns
------------
mockgal_pd_new: pandas DataFrame
Original info + Halo rvir
"""
## Constants
Prog_msg = param_dict['Prog_msg']
if param_dict['verbose']:
print('{0} Halo Rvir Calc. ....'.format(Prog_msg))
## Copies of DataFrames
gal_pd = mockgal_pd.copy()
## Cosmological model parameters
cosmo_model = param_dict['cosmo_model']
H0 = cosmo_model.H0.to(u.km/(u.s * u.Mpc))
Om0 = cosmo_model.Om0
Ode0 = cosmo_model.Ode0
## Other constants
G = ac.G
speed_c = ac.c.to(u.km/u.s)
##
## Halo IDs
haloid_counts = Counter(gal_pd['halo_hostid'])
haloid_arr = num.unique(gal_pd['halo_hostid'])
## Mean cz's
haloid_z = num.array([gal_pd.loc[gal_pd['halo_hostid']==xx,'cz'].mean() for \
xx in haloid_arr])/speed_c.value
## Halo masses
haloid_mass = num.array([gal_pd.loc[gal_pd['halo_hostid']==xx,'log_host_mvir'].mean() for \
xx in haloid_arr])
## Halo rvir - in Mpc/h
rvir_num = (10**(haloid_mass)*u.Msun) * G
rvir_den = 100 * H0**2 * (Om0 * (1.+haloid_z)**3 + Ode0)
rvir_q = ((rvir_num / rvir_den)**(1./3)).to(u.Mpc)
rvir = rvir_q.value
## Replacing with zero if necessary
if catl_sim_eq:
## Replacing value
repl_val = 0.
## Halo ngals - in catalogue
haloid_ngal_cat = num.array([haloid_counts[xx] for xx in haloid_arr])
## Halo ngals - in simulation
haloid_ngal_sim = num.array([gal_pd.loc[gal_pd['halo_hostid']==xx, 'halo_ngal'].values[0]\
for xx in haloid_arr])
## Chaning `rvir` values to zeros if halo is not complete
rvir_bool = [1 if haloid_ngal_cat[xx]==haloid_ngal_sim[xx] else 0 \
for xx in range(len(haloid_arr))]
rvir[rvir_bool] = repl_val
## Saving to DataFrame
rvir_pd = pd.DataFrame({'halo_hostid':haloid_arr, 'halo_rvir':rvir})
# Dropping 'rvir' column for subhalo
gal_pd.drop('halo_rvir', axis=1, inplace=True)
## Merging DataFrames
# Galaxies
mockgal_pd_new = pd.merge( left=gal_pd ,
right=rvir_pd ,
how='left' ,
left_on='halo_hostid' ,
right_on='halo_hostid')
if param_dict['verbose']:
print('{0} Halo Rvir Calc. ....'.format(Prog_msg))
return mockgal_pd_new
## ---------| Writing to Files |------------##
def writing_to_output_file(mockgal_pd, mockgroup_pd, zz_mock, hb_ii_name,
param_dict, proj_dict, output_fmt = 'hdf5', perf_catl=False):
"""
Writes the galaxy and group information to ascii files + astropy LaTeX
tables
Parameters
-----------
mockgal_pd: pandas DataFrame
DataFrame containing information for each mock galaxy.
Includes galaxy properties + group ID + Ab. Match. Mass
mockgroup_pd: pandas DataFrame
DataFame containing information for each galaxy group
zz_mock: float
number of group/galaxy catalogue being analyzed
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
Dictionary with current and new paths to project directories
perf_catl: boolean, optional (default = False)
if 'true', it saves the `perfect` version of the galaxy / group
catalogue.
"""
## Keys
gal_key = 'gal_catl'
group_key = 'group_catl'
## Filenames
if perf_catl:
## Perfect Galaxy catalogue
gal_outdir = param_dict['survey_args'].catl_output_dir(hb_ii_name,
catl_kind='memb', perf=True, create_dir=True)
gal_file = os.path.join(gal_outdir,
'{0}_{1}_cat_{2}_{3}_memb_cat_perf.{4}'.format(
hb_ii_name,
param_dict['survey'],
zz_mock,
param_dict['cosmo_choice'],
output_fmt))
## Perfect Group catalogue
group_outdir = param_dict['survey_args'].catl_output_dir(hb_ii_name,
catl_kind='group', perf=True, create_dir=True)
group_file = os.path.join(group_outdir,
'{0}_{1}_cat_{2}_{3}_group_cat_perf.{4}'.format(
hb_ii_name,
param_dict['survey'],
zz_mock,
param_dict['cosmo_choice'],
output_fmt))
else:
## Normal galaxy catalogue
gal_outdir = param_dict['survey_args'].catl_output_dir(hb_ii_name,
catl_kind='memb', perf=False, create_dir=True)
gal_file = os.path.join(gal_outdir,
'{0}_{1}_cat_{2}_{3}_memb_cat.{4}'.format(
hb_ii_name,
param_dict['survey'],
zz_mock,
param_dict['cosmo_choice'],
output_fmt))
## Normal group catalogue
group_outdir = param_dict['survey_args'].catl_output_dir(hb_ii_name,
catl_kind='group', perf=False, create_dir=True)
group_file = os.path.join(group_outdir,
'{0}_{1}_cat_{2}_{3}_group_cat.{4}'.format(
hb_ii_name,
param_dict['survey'],
zz_mock,
param_dict['cosmo_choice'],
output_fmt))
##
## Saving DataFrames to files
# Member catalogue
cfreaders.pandas_df_to_hdf5_file(mockgal_pd, gal_file, key=gal_key)
# Group catalogue
cfreaders.pandas_df_to_hdf5_file(mockgroup_pd, group_file, key=group_key)
##
## Checking for file's existence
cfutils.File_Exists(gal_file)
cfutils.File_Exists(group_file)
print('{0} gal_file : {1}'.format(param_dict['Prog_msg'], gal_file))
print('{0} group_file: {1}'.format(param_dict['Prog_msg'], group_file))
## -----------| Plotting-related functions |----------- ##
def mockcatls_simbox_plot(hb_ii_name, param_dict, proj_dict, catl_ext='.hdf5',
fig_fmt='pdf', figsize=(9,9)):
"""
Plots the distribution of the mock catalogues in the simulation box
Parameters
------------
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
dictionary with info of the project that uses the
`Data Science` Cookiecutter template.
catl_ext: string, optional (default = '.hdf5')
file extension of the mock catalogues
fig_fmt: string, optional (default = 'pdf')
file format of the output figure
Options: `pdf`, or `png`
figsize: tuple, optional (default = (9,9))
figure size of the output figure, in units of inches
"""
## Constants and variables
Prog_msg = param_dict['Prog_msg' ]
plot_dict = param_dict['plot_dict']
markersize = plot_dict['markersize']
## List of catalogues
catl_path_arr = param_dict['survey_args'].hb_gal_catl_files_list(
hb_ii_name, catl_kind='memb', perf=False, file_ext=catl_ext)
n_catls = len(catl_path_arr)
## Filename
fig_outdir = param_dict['survey_args'].fig_outdir(hb_ii_name,
create_dir=True)
fname = os.path.join( fig_outdir,
'{0}_{1}_{2}_{3}_xyz_mocks.{4}'.format(
param_dict['survey'],
hb_ii_name,
param_dict['halotype'],
param_dict['cosmo_choice'],
fig_fmt))
## Setting up figure
x_label = r'\boldmath X [Mpc $\mathrm{h^{-1}}$]'
y_label = r'\boldmath Y [Mpc $\mathrm{h^{-1}}$]'
z_label = r'\boldmath Z [Mpc $\mathrm{h^{-1}}$]'
xlim = (0, param_dict['size_cube'])
ylim = (0, param_dict['size_cube'])
# Figure title
if param_dict['survey'] == 'ECO':
fig_title = 'ECO Survey'
else:
fig_title = 'RESOLVE {0}'.format(param_dict['survey'])
# Figure and axes
plt.close()
plt.clf()
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(221, facecolor='white', aspect='equal')
ax2 = fig.add_subplot(222, facecolor='white', aspect='equal')
ax3 = fig.add_subplot(223, facecolor='white', aspect='equal')
# Limits
ax1.set_xlim(xlim)
ax1.set_ylim(ylim)
ax2.set_xlim(xlim)
ax2.set_ylim(ylim)
ax3.set_xlim(xlim)
ax3.set_ylim(ylim)
# Labels
ax1.set_xlabel(x_label, fontsize=plot_dict['size_label'])
ax1.set_ylabel(y_label, fontsize=plot_dict['size_label'])
ax2.set_xlabel(x_label, fontsize=plot_dict['size_label'])
ax2.set_ylabel(z_label, fontsize=plot_dict['size_label'])
ax3.set_xlabel(y_label, fontsize=plot_dict['size_label'])
ax3.set_ylabel(z_label, fontsize=plot_dict['size_label'])
# Grid
ax1.grid(True, color='gray', which='major', linestyle='--')
ax2.grid(True, color='gray', which='major', linestyle='--')
ax3.grid(True, color='gray', which='major', linestyle='--')
# Major ticks
major_ticks = num.arange(0,param_dict['size_cube']+1, 20)
ax1.set_xticks(major_ticks)
ax1.set_yticks(major_ticks)
ax2.set_xticks(major_ticks)
ax2.set_yticks(major_ticks)
ax3.set_xticks(major_ticks)
ax3.set_yticks(major_ticks)
# Colormap
cm = plt.get_cmap('gist_rainbow')
col_arr = [cm(ii/float(n_catls)) for ii in range(n_catls)]
# Title
title_obj = fig.suptitle(fig_title, fontsize=plot_dict['title'])
title_obj.set_y(1.04)
##
## Looping over different catalogues
for kk, catl_kk in enumerate(tqdm(catl_path_arr)):
# Reading parameters
catl_kk_pd = cfreaders.read_hdf5_file_to_pandas_DF(catl_kk)
# Color
color_kk = col_arr[kk]
# Galaxy indices
( x_kk_arr,
y_kk_arr,
z_kk_arr) = catl_kk_pd[['x_orig','y_orig','z_orig']].values.T
## Plotting points (galaxies)
ax1.plot(x_kk_arr, y_kk_arr, marker='o', color=color_kk,
markersize=markersize, linestyle='None', rasterized=True)
ax2.plot(x_kk_arr, z_kk_arr, marker='o', color=color_kk,
markersize=markersize, linestyle='None', rasterized=True)
ax3.plot(y_kk_arr, z_kk_arr, marker='o', color=color_kk,
markersize=markersize, linestyle='None', rasterized=True)
# Adjusting space
plt.subplots_adjust(top=0.86)
plt.tight_layout()
# Saving figure
if fig_fmt=='pdf':
plt.savefig(fname, bbox_inches='tight')
else:
plt.savefig(fname, bbox_inches='tight', dpi=400)
print('{0} Figure saved as: {1}'.format(Prog_msg, fname))
plt.clf()
plt.close()
def mocks_lum_function(hb_ii_name, param_dict, proj_dict, catl_ext='.hdf5',
fig_fmt='pdf', figsize=(9,9)):
"""
Computes the luminosity function of the mock catalogues
Parameters
------------
hb_ii_name : `str`
Name of key corresponding to the Halobias file being analyzed.
param_dict: python dictionary
dictionary with `project` variables
proj_dict: python dictionary
dictionary with info of the project that uses the
`Data Science` Cookiecutter template.
catl_ext: string, optional (default = '.hdf5')
file extension of the mock catalogues
fig_fmt: string, optional (default = 'pdf')
file format of the output figure
Options: `pdf`, or `png`
figsize: tuple, optional (default = (9,9))
figure size of the output figure, in units of inches
"""
matplotlib.rcParams['axes.linewidth'] = 2.5
## Constants and variables
Prog_msg = param_dict['Prog_msg' ]
plot_dict = param_dict['plot_dict']
markersize = plot_dict['markersize']
## Separation for the `M_r` bins, in units of magnitudes
mr_bins_sep = 0.2
## List of catalogues
catl_path_arr = param_dict['survey_args'].hb_gal_catl_files_list(
hb_ii_name, catl_kind='memb', perf=False, file_ext=catl_ext)
n_catls = len(catl_path_arr)
## Filename
fig_outdir = param_dict['survey_args'].fig_outdir(hb_ii_name,
create_dir=True)
fname = os.path.join( fig_outdir,
'{0}_{1}_{2}_{3}_lum_function_mocks.{4}'.format(
param_dict['survey'],
hb_ii_name,
param_dict['halotype'],
param_dict['cosmo_choice'],
fig_fmt))
# Colormap
cm = plt.get_cmap('gist_rainbow')
col_arr = [cm(ii/float(n_catls)) for ii in range(n_catls)]
## Setting up figure
x_label = r'\boldmath $M_{r}$'
y_label = r'\boldmath $n(< M_{r}) \left[h^{3}\ \textrm{Mpc}^{-3}\right]$'
# Figure
plt.clf()
plt.close()
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111, facecolor='white')
# Labels
ax1.set_xlabel(x_label, fontsize=plot_dict['size_label'])
ax1.set_ylabel(y_label, fontsize=plot_dict['size_label'])
## Looping over mock catalogues
## Looping over different catalogues
for kk, catl_kk in enumerate(tqdm(catl_path_arr)):
# Reading parameters
catl_kk_pd = cfreaders.read_hdf5_file_to_pandas_DF(catl_kk)
# Color
color_kk = col_arr[kk]
## Calculating luminosity function
mr_bins = cstats.Bins_array_create(catl_kk_pd['abs_rmag'], base=mr_bins_sep)
N_lum = [num.where(catl_kk_pd['abs_rmag'] < xx)[0].size+1 for xx in mr_bins]
n_lum = num.asarray(N_lum)/param_dict['survey_vol']
## Plotting
ax1.plot(mr_bins, n_lum, color=color_kk, marker='o', linestyle='-',
markersize=markersize)
# Log-axis
ax1.set_yscale('log')
# Reverse axis
ax1.invert_xaxis()
# Adjusting space
plt.subplots_adjust(top=0.86)
plt.tight_layout()
# Saving figure
if fig_fmt=='pdf':
plt.savefig(fname, bbox_inches='tight')
else:
plt.savefig(fname, bbox_inches='tight', dpi=400)
print('{0} Figure saved as: {1}'.format(Prog_msg, fname))
plt.clf()
plt.close()
## -----------| Survey-related functions |----------- ##
def survey_specs(param_dict):
"""
Provides the specifications of the survey being created
Parameters
----------
param_dict: python dictionary
dictionary with `project` variables
Returns
----------
param_dict: python dictionary
dictionary with the 'updated' project variables
"""
## Cosmological model
cosmo_model = param_dict['survey_args'].cosmo_create()
## Redshift, volumen and r-mag limit for each survey
if param_dict['survey'] == 'A':
czmin = 2532.
czmax = 7470.
# survey_vol = 20957.7789388
mr_limit = -17.33
elif param_dict['survey'] == 'B':
czmin = 4250.
czmax = 7250.
# survey_vol = 15908.063125
mr_limit = -17.00
elif param_dict['survey'] == 'ECO':
czmin = 2532.
czmax = 7470.
# survey_vol = 192294.221932
mr_limit = -17.33
##
## Right Ascension and Declination coordinates for each survey
if param_dict['survey'] == 'A':
ra_min_real = 131.25
ra_max_real = 236.25
dec_min = 0.
dec_max = 5.
# Extras
dec_range = dec_max - dec_min
ra_range = ra_max_real - ra_min_real
ra_min = (180. - ra_range)/2.
ra_max = ra_min + ra_range
ra_diff = ra_max_real - ra_max
# Assert statements
assert(dec_min < dec_max)
assert(ra_range >= 0)
assert(ra_min < ra_max)
assert(ra_min_real < ra_max_real)
elif param_dict['survey'] == 'B':
ra_min_real = 330.
ra_max_real = 45.
dec_min = -1.25
dec_max = 1.25
# Extras
dec_range = dec_max - dec_min
ra_range = ra_max_real - (ra_min_real - 360.)
ra_min = (180. - ra_range)/2.
ra_max = ra_min + ra_range
ra_diff = ra_max_real - ra_max
# Assert statements
assert(dec_min < dec_max)
assert(ra_range >= 0)
assert(ra_min < ra_max)
elif param_dict['survey'] == 'ECO':
ra_min_real = 130.05
ra_max_real = 237.45
dec_min = -1
dec_max = 49.85
# Extras
dec_range = dec_max - dec_min
ra_range = ra_max_real - ra_min_real
ra_min = (180. - ra_range)/2.
ra_max = ra_min + ra_range
ra_diff = ra_max_real - ra_max
# Assert statements
assert(dec_min < dec_max)
assert(ra_range >= 0)
assert(ra_min < ra_max)
assert(ra_min_real < ra_max_real)
## Survey volume
km_s = u.km/u.s
z_arr = (num.array([czmin, czmax])*km_s/(ac.c.to(km_s))).value
z_arr = (num.array([czmin, czmax])*km_s/(3e5*km_s)).value
r_arr = cosmo_model.comoving_distance(z_arr).to(u.Mpc).value
survey_vol = param_dict['survey_args'].survey_vol_calc( [0, ra_range],
[dec_min, dec_max],
r_arr)
##
## Survey height, and other geometrical factors
( h_total,
h1 ,
s1_top ,
s2 ) = param_dict['survey_args'].geometry_calc( r_arr[0],
r_arr[1],
ra_range)
( h_side ,
h2 ,
s1_side,
d_th ) = param_dict['survey_args'].geometry_calc( r_arr[0],
r_arr[1],
dec_range)
##
# ra_dec dictionary
coord_dict = {}
coord_dict['ra_min_real'] = ra_min_real
coord_dict['ra_max_real'] = ra_max_real
coord_dict['dec_min' ] = dec_min
coord_dict['dec_max' ] = dec_max
coord_dict['dec_range' ] = dec_range
coord_dict['ra_range' ] = ra_range
coord_dict['ra_min' ] = ra_min
coord_dict['ra_max' ] = ra_max
coord_dict['ra_diff' ] = ra_diff
# Height and other geometrical objects
coord_dict['h_total' ] = h_total
coord_dict['s1_top' ] = s1_top
coord_dict['s2' ] = s2
coord_dict['h1' ] = h1
coord_dict['h_side' ] = h_side
coord_dict['s1_side' ] = s1_side
coord_dict['d_th' ] = d_th
coord_dict['h2' ] = h2
coord_dict['r_arr' ] = r_arr
##
## Resolve-B Mr limit
mr_eco = -17.33
mr_res_b = -17.00
## Saving to `param_dict`
param_dict['czmin' ] = czmin
param_dict['czmax' ] = czmax
param_dict['zmin' ] = z_arr[0]
param_dict['zmax' ] = z_arr[1]
param_dict['survey_vol'] = survey_vol
param_dict['mr_limit' ] = mr_limit
param_dict['mr_eco' ] = mr_eco
param_dict['mr_res_b' ] = mr_res_b
param_dict['coord_dict'] = coord_dict
return param_dict
## --------- Multiprocessing ------------##
def multiprocessing_catls(hb_keys, param_dict, proj_dict, memb_tuples_ii):
"""
Distributes the analysis of the catalogues into more than 1 processor
Parameters:
-----------
hb_keys : `numpy.ndarray`
List of Halobias filenames keys.
param_dict : `dict`
Dictionary with the `project` variables.
proj_dict : `dict`
Dictionary with current and new paths to project directories
memb_tuples_ii : `tuple`
Tuple of halobias file indices to be analyzed
"""
## Program Message
Prog_msg = param_dict['Prog_msg']
## Reading in Catalogue IDs
start_ii, end_ii = memb_tuples_ii
## Index value
idx_arr = num.array(range(start_ii, end_ii), dtype=int)
## Catalogue array
hb_keys_ii = hb_keys[start_ii : end_ii]
##
## Looping the desired catalogues
for (ii, hb_key_ii) in zip(idx_arr, hb_keys_ii):
## Converting index to main `int`
ii = int(ii)
## Choosing 1st catalogue
if param_dict['verbose']:
print('{0} Analyzing `{1}`\n'.format(Prog_msg, hb_key_ii))
## Extracting `name` of the catalogue
hb_ii_name = os.path.splitext(os.path.split(hb_key_ii)[1])[0]
## Analaysis for the Halobias file
hb_analysis(ii, hb_ii_name, param_dict, proj_dict)
## --------- Main Function ------------##
def main(args):
"""
Main function to create CAM mock group galaxy catalogues.
"""
## Starting time
start_time = datetime.now()
## Reading all elements and converting to python dictionary
param_dict = vars(args)
## Checking for correct input
param_vals_test(param_dict)
#
# Creating instance of `ReadML` with the input parameters
param_dict['survey_args'] = ReadSurvey(**param_dict)
## Program message
Prog_msg = param_dict['Prog_msg']
# Adding additional parameters
param_dict = add_to_dict(param_dict)
##
## Creating Folder Structure
# proj_dict = directory_skeleton(param_dict, cwpaths.cookiecutter_paths(__file__))
proj_dict = param_dict['survey_args'].proj_dict
proj_dict = directory_skeleton(param_dict, proj_dict)
##
## Printing out project variables
print('\n'+50*'='+'\n')
for key, key_val in sorted(param_dict.items()):
key_arr = ['Prog_msg', 'hb_files_dict', 'z_dc_pd', 'mf_pd']
if key not in key_arr:
print('{0} `{1}`: {2}'.format(Prog_msg, key, key_val))
print('\n'+50*'='+'\n')
### ---- Analyzing Catalogues ---- ###
##
# Number of Halobias Files
n_hb_files = param_dict['n_hb_files']
# Halobias Keys
hb_keys = num.sort(list(param_dict['hb_files_dict'].keys()))
## Using `multiprocessing` to analyze merged catalogues files
## Number of CPU's to use
cpu_number = int(cpu_count() * param_dict['cpu_frac'])
## Defining step-size for each CPU
if cpu_number <= n_hb_files:
catl_step = int(n_hb_files / cpu_number)
memb_arr = num.arange(0, n_hb_files+1, catl_step)
else:
catl_step = int((n_hb_files / cpu_number)**-1)
memb_arr = num.arange(0, n_hb_files+1)
## Array with designated catalogue numbers for each CPU
memb_arr[-1] = n_hb_files
## Tuples of the ID of each catalogue
memb_tuples = num.asarray([(memb_arr[xx], memb_arr[xx+1])
for xx in range(memb_arr.size-1)])
## Assigning `memb_tuples` to function `multiprocessing_catls`
procs = []
for ii in range(len(memb_tuples)):
# Defining `proc` element
proc = Process(target=multiprocessing_catls,
args=(hb_keys, param_dict,
proj_dict, memb_tuples[ii]))
# Appending to main `procs` list
procs.append(proc)
proc.start()
##
## Joining `procs`
for proc in procs:
proc.join()
## End time for running the catalogues
end_time = datetime.now()
total_time = end_time - start_time
print('{0} Total Time taken (Create): {1}'.format(Prog_msg, total_time))
# Main function
if __name__=='__main__':
## Input arguments
args = get_parser()
# Main Function
main(args)
| [
"cosmo_utils.utils.file_utils.File_Exists",
"os.remove",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.sum",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"numpy.floor",
"cosmo_utils.utils.web_utils.url_checker",
"numpy.ones",
"matplotlib.pyplot.figure",
"cosmo_utils.utils.file_utils.Program_Ms... | [((1037, 1058), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1051, 1058), False, 'import matplotlib\n'), ((1128, 1155), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1134, 1155), True, 'import matplotlib.pyplot as plt\n'), ((3661, 3747), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': 'description_msg', 'formatter_class': 'SortingHelpFormatter'}), '(description=description_msg, formatter_class=\n SortingHelpFormatter)\n', (3675, 3747), False, 'from argparse import ArgumentParser\n'), ((9611, 9637), 'cosmo_utils.utils.web_utils.url_checker', 'cweb.url_checker', (['url_catl'], {}), '(url_catl)\n', (9627, 9637), True, 'from cosmo_utils.utils import web_utils as cweb\n'), ((9759, 9791), 'cosmo_utils.utils.web_utils.url_checker', 'cweb.url_checker', (['url_mock_cubes'], {}), '(url_mock_cubes)\n', (9775, 9791), True, 'from cosmo_utils.utils import web_utils as cweb\n'), ((13867, 13942), 'os.path.join', 'os.path.join', (["param_dict['survey_args'].proj_dict['base_dir']", '"""references"""'], {}), "(param_dict['survey_args'].proj_dict['base_dir'], 'references')\n", (13879, 13942), False, 'import os\n'), ((15320, 15354), 'cosmo_utils.utils.file_utils.File_Exists', 'cfutils.File_Exists', (['tar_file_path'], {}), '(tar_file_path)\n', (15339, 15354), True, 'from cosmo_utils.utils import file_utils as cfutils\n'), ((18207, 18256), 'cosmo_utils.utils.file_readers.read_hdf5_file_to_pandas_DF', 'cfreaders.read_hdf5_file_to_pandas_DF', (['hb_ii_file'], {}), '(hb_ii_file)\n', (18244, 18256), True, 'from cosmo_utils.utils import file_readers as cfreaders\n'), ((19633, 19665), 'collections.Counter', 'Counter', (["hb_ii_pd['halo_hostid']"], {}), "(hb_ii_pd['halo_hostid'])\n", (19640, 19665), False, 'from collections import Counter\n'), ((20031, 20073), 'numpy.log10', 'num.log10', (["hb_ii_pd['halo_mvir_host_halo']"], {}), "(hb_ii_pd['halo_mvir_host_halo'])\n", (20040, 20073), True, 'import numpy as num\n'), ((21137, 21210), 'numpy.unique', 'num.unique', (["hb_ii_pd.loc[hb_ii_pd['haloid_host_ngal'] > 1, 'halo_hostid']"], {}), "(hb_ii_pd.loc[hb_ii_pd['haloid_host_ngal'] > 1, 'halo_hostid'])\n", (21147, 21210), True, 'import numpy as num\n'), ((21522, 21554), 'numpy.zeros', 'num.zeros', (['hb_ii_pd_mod.shape[0]'], {}), '(hb_ii_pd_mod.shape[0])\n', (21531, 21554), True, 'import numpy as num\n'), ((24135, 24158), 'copy.deepcopy', 'copy.deepcopy', (['hb_ii_pd'], {}), '(hb_ii_pd)\n', (24148, 24158), False, 'import copy\n'), ((25091, 25114), 'copy.deepcopy', 'copy.deepcopy', (['hb_ii_pd'], {}), '(hb_ii_pd)\n', (25104, 25114), False, 'import copy\n'), ((25135, 25163), 'copy.deepcopy', 'copy.deepcopy', (['coord_dict_ul'], {}), '(coord_dict_ul)\n', (25148, 25163), False, 'import copy\n'), ((27630, 27664), 'copy.deepcopy', 'copy.deepcopy', (['pos_coords_mocks_zz'], {}), '(pos_coords_mocks_zz)\n', (27643, 27664), False, 'import copy\n'), ((27931, 27962), 'numpy.asarray', 'num.asarray', (['[x_ii, y_ii, z_ii]'], {}), '([x_ii, y_ii, z_ii])\n', (27942, 27962), True, 'import numpy as num\n'), ((31673, 31710), 'scipy.interpolate.interp1d', 'interp1d', (["z_dc_pd['dc']", "z_dc_pd['z']"], {}), "(z_dc_pd['dc'], z_dc_pd['z'])\n", (31681, 31710), False, 'from scipy.interpolate import interp1d\n'), ((31918, 31936), 'numpy.zeros', 'num.zeros', (['hb_ngal'], {}), '(hb_ngal)\n', (31927, 31936), True, 'import numpy as num\n'), ((31957, 31975), 'numpy.zeros', 'num.zeros', (['hb_ngal'], {}), '(hb_ngal)\n', (31966, 31975), True, 'import numpy as num\n'), ((31996, 32014), 'numpy.zeros', 'num.zeros', (['hb_ngal'], {}), '(hb_ngal)\n', (32005, 32014), True, 'import numpy as num\n'), ((32035, 32053), 'numpy.zeros', 'num.zeros', (['hb_ngal'], {}), '(hb_ngal)\n', (32044, 32053), True, 'import numpy as num\n'), ((32074, 32092), 'numpy.zeros', 'num.zeros', (['hb_ngal'], {}), '(hb_ngal)\n', (32083, 32092), True, 'import numpy as num\n'), ((32113, 32131), 'numpy.zeros', 'num.zeros', (['hb_ngal'], {}), '(hb_ngal)\n', (32122, 32131), True, 'import numpy as num\n'), ((32152, 32170), 'numpy.zeros', 'num.zeros', (['hb_ngal'], {}), '(hb_ngal)\n', (32161, 32170), True, 'import numpy as num\n'), ((32191, 32209), 'numpy.zeros', 'num.zeros', (['hb_ngal'], {}), '(hb_ngal)\n', (32200, 32209), True, 'import numpy as num\n'), ((37677, 37752), 'cosmo_utils.utils.file_readers.pandas_df_to_hdf5_file', 'cfreaders.pandas_df_to_hdf5_file', (['mock_pd', 'mock_catl_pd_file'], {'key': '"""galcatl"""'}), "(mock_pd, mock_catl_pd_file, key='galcatl')\n", (37709, 37752), True, 'from cosmo_utils.utils import file_readers as cfreaders\n'), ((39690, 39726), 'cosmo_utils.utils.file_utils.File_Exists', 'cfutils.File_Exists', (['mock_coord_path'], {}), '(mock_coord_path)\n', (39709, 39726), True, 'from cosmo_utils.utils import file_utils as cfutils\n'), ((39849, 39877), 'cosmo_utils.utils.file_utils.File_Exists', 'cfutils.File_Exists', (['fof_exe'], {}), '(fof_exe)\n', (39868, 39877), True, 'from cosmo_utils.utils import file_utils as cfutils\n'), ((40380, 40416), 'subprocess.call', 'subprocess.call', (['fof_cmd'], {'shell': '(True)'}), '(fof_cmd, shell=True)\n', (40395, 40416), False, 'import subprocess\n'), ((40716, 40752), 'subprocess.call', 'subprocess.call', (['gal_cmd'], {'shell': '(True)'}), '(gal_cmd, shell=True)\n', (40731, 40752), False, 'import subprocess\n'), ((40759, 40797), 'subprocess.call', 'subprocess.call', (['group_cmd'], {'shell': '(True)'}), '(group_cmd, shell=True)\n', (40774, 40797), False, 'import subprocess\n'), ((41405, 41473), 'pandas.read_csv', 'pd.read_csv', (['grep_g_file'], {'sep': '"""\\\\s+"""', 'header': 'None', 'names': 'group_names'}), "(grep_g_file, sep='\\\\s+', header=None, names=group_names)\n", (41416, 41473), True, 'import pandas as pd\n'), ((41758, 41806), 'pandas.concat', 'pd.concat', (["[mock_pd, grep_pd['groupid']]"], {'axis': '(1)'}), "([mock_pd, grep_pd['groupid']], axis=1)\n", (41767, 41806), True, 'import pandas as pd\n'), ((42091, 42110), 'os.remove', 'os.remove', (['fof_file'], {}), '(fof_file)\n', (42100, 42110), False, 'import os\n'), ((42115, 42135), 'os.remove', 'os.remove', (['grep_file'], {}), '(grep_file)\n', (42124, 42135), False, 'import os\n'), ((42140, 42162), 'os.remove', 'os.remove', (['grep_g_file'], {}), '(grep_g_file)\n', (42149, 42162), False, 'import os\n'), ((42167, 42193), 'os.remove', 'os.remove', (['mock_coord_path'], {}), '(mock_coord_path)\n', (42176, 42193), False, 'import os\n'), ((45031, 45058), 'numpy.asarray', 'num.asarray', (['group_prop_arr'], {}), '(group_prop_arr)\n', (45042, 45058), True, 'import numpy as num\n'), ((45495, 45639), 'cosmo_utils.mock_catalogues.abundance_matching.abundance_matching_f', 'cm.abundance_matching.abundance_matching_f', (['group_prop_arr', 'mf_dict'], {'volume1': "param_dict['survey_vol']", 'dens1_opt': '(False)', 'reverse': 'reverse_opt'}), "(group_prop_arr, mf_dict, volume1\n =param_dict['survey_vol'], dens1_opt=False, reverse=reverse_opt)\n", (45537, 45639), True, 'from cosmo_utils import mock_catalogues as cm\n'), ((45879, 45981), 'pandas.merge', 'pd.merge', (['gal_pd', "group_pd[['M_group', 'ngals']]"], {'how': '"""left"""', 'left_on': '"""groupid"""', 'right_index': '(True)'}), "(gal_pd, group_pd[['M_group', 'ngals']], how='left', left_on=\n 'groupid', right_index=True)\n", (45887, 45981), True, 'import pandas as pd\n'), ((46178, 46203), 'numpy.abs', 'num.abs', (['gal_pd[prop_gal]'], {}), '(gal_pd[prop_gal])\n', (46185, 46203), True, 'import numpy as num\n'), ((47146, 47221), 'pandas.merge', 'pd.merge', (['mockgal_pd', 'gal_pd'], {'how': '"""left"""', 'left_index': '(True)', 'right_index': '(True)'}), "(mockgal_pd, gal_pd, how='left', left_index=True, right_index=True)\n", (47154, 47221), True, 'import pandas as pd\n'), ((47266, 47345), 'pandas.merge', 'pd.merge', (['mockgroup_pd', 'group_pd'], {'how': '"""left"""', 'left_index': '(True)', 'right_index': '(True)'}), "(mockgroup_pd, group_pd, how='left', left_index=True, right_index=True)\n", (47274, 47345), True, 'import pandas as pd\n'), ((49104, 49152), 'cosmo_utils.mock_catalogues.mags_calculations.luminosity_to_absolute_mag', 'cmags.luminosity_to_absolute_mag', (['group_lum', '"""r"""'], {}), "(group_lum, 'r')\n", (49136, 49152), True, 'from cosmo_utils.mock_catalogues import mags_calculations as cmags\n'), ((50631, 50650), 'astropy.constants.c.to', 'ac.c.to', (['(u.km / u.s)'], {}), '(u.km / u.s)\n', (50638, 50650), True, 'import astropy.constants as ac\n'), ((50692, 50722), 'collections.Counter', 'Counter', (["gal_pd['halo_hostid']"], {}), "(gal_pd['halo_hostid'])\n", (50699, 50722), False, 'from collections import Counter\n'), ((50743, 50776), 'numpy.unique', 'num.unique', (["gal_pd['halo_hostid']"], {}), "(gal_pd['halo_hostid'])\n", (50753, 50776), True, 'import numpy as num\n'), ((51999, 52059), 'pandas.DataFrame', 'pd.DataFrame', (["{'halo_hostid': haloid_arr, 'halo_rvir': rvir}"], {}), "({'halo_hostid': haloid_arr, 'halo_rvir': rvir})\n", (52011, 52059), True, 'import pandas as pd\n'), ((52212, 52311), 'pandas.merge', 'pd.merge', ([], {'left': 'gal_pd', 'right': 'rvir_pd', 'how': '"""left"""', 'left_on': '"""halo_hostid"""', 'right_on': '"""halo_hostid"""'}), "(left=gal_pd, right=rvir_pd, how='left', left_on='halo_hostid',\n right_on='halo_hostid')\n", (52220, 52311), True, 'import pandas as pd\n'), ((56066, 56133), 'cosmo_utils.utils.file_readers.pandas_df_to_hdf5_file', 'cfreaders.pandas_df_to_hdf5_file', (['mockgal_pd', 'gal_file'], {'key': 'gal_key'}), '(mockgal_pd, gal_file, key=gal_key)\n', (56098, 56133), True, 'from cosmo_utils.utils import file_readers as cfreaders\n'), ((56160, 56233), 'cosmo_utils.utils.file_readers.pandas_df_to_hdf5_file', 'cfreaders.pandas_df_to_hdf5_file', (['mockgroup_pd', 'group_file'], {'key': 'group_key'}), '(mockgroup_pd, group_file, key=group_key)\n', (56192, 56233), True, 'from cosmo_utils.utils import file_readers as cfreaders\n'), ((56282, 56311), 'cosmo_utils.utils.file_utils.File_Exists', 'cfutils.File_Exists', (['gal_file'], {}), '(gal_file)\n', (56301, 56311), True, 'from cosmo_utils.utils import file_utils as cfutils\n'), ((56316, 56347), 'cosmo_utils.utils.file_utils.File_Exists', 'cfutils.File_Exists', (['group_file'], {}), '(group_file)\n', (56335, 56347), True, 'from cosmo_utils.utils import file_utils as cfutils\n'), ((58723, 58734), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (58732, 58734), True, 'import matplotlib.pyplot as plt\n'), ((58739, 58748), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (58746, 58748), True, 'import matplotlib.pyplot as plt\n'), ((58759, 58786), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (58769, 58786), True, 'import matplotlib.pyplot as plt\n'), ((59760, 59806), 'numpy.arange', 'num.arange', (['(0)', "(param_dict['size_cube'] + 1)", '(20)'], {}), "(0, param_dict['size_cube'] + 1, 20)\n", (59770, 59806), True, 'import numpy as num\n'), ((60025, 60053), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (60037, 60053), True, 'import matplotlib.pyplot as plt\n'), ((61083, 61112), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.86)'}), '(top=0.86)\n', (61102, 61112), True, 'import matplotlib.pyplot as plt\n'), ((61117, 61135), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (61133, 61135), True, 'import matplotlib.pyplot as plt\n'), ((61360, 61369), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (61367, 61369), True, 'import matplotlib.pyplot as plt\n'), ((61374, 61385), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (61383, 61385), True, 'import matplotlib.pyplot as plt\n'), ((63249, 63277), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (63261, 63277), True, 'import matplotlib.pyplot as plt\n'), ((63496, 63505), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (63503, 63505), True, 'import matplotlib.pyplot as plt\n'), ((63510, 63521), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (63519, 63521), True, 'import matplotlib.pyplot as plt\n'), ((63532, 63559), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (63542, 63559), True, 'import matplotlib.pyplot as plt\n'), ((64540, 64569), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.86)'}), '(top=0.86)\n', (64559, 64569), True, 'import matplotlib.pyplot as plt\n'), ((64574, 64592), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (64590, 64592), True, 'import matplotlib.pyplot as plt\n'), ((64817, 64826), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (64824, 64826), True, 'import matplotlib.pyplot as plt\n'), ((64831, 64842), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (64840, 64842), True, 'import matplotlib.pyplot as plt\n'), ((71418, 71432), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (71430, 71432), False, 'from datetime import datetime\n'), ((71691, 71715), 'src.survey_utils.ReadSurvey', 'ReadSurvey', ([], {}), '(**param_dict)\n', (71701, 71715), False, 'from src.survey_utils import ReadSurvey\n'), ((73863, 73877), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (73875, 73877), False, 'from datetime import datetime\n'), ((3360, 3391), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['msg'], {}), '(msg)\n', (3386, 3391), False, 'import argparse\n'), ((9111, 9122), 'shutil.which', 'which', (['name'], {}), '(name)\n', (9116, 9122), False, 'from shutil import which\n'), ((11882, 11901), 'astropy.constants.c.to', 'ac.c.to', (['(u.km / u.s)'], {}), '(u.km / u.s)\n', (11889, 11901), True, 'import astropy.constants as ac\n'), ((14472, 14512), 'tarfile.open', 'tarfile.open', (['tar_file_path'], {'mode': '"""w:gz"""'}), "(tar_file_path, mode='w:gz')\n", (14484, 14512), False, 'import tarfile\n'), ((21849, 21881), 'tqdm.tqdm', 'tqdm', (['haloid_unq'], {'desc': 'tqdm_desc'}), '(haloid_unq, desc=tqdm_desc)\n', (21853, 21881), False, 'from tqdm import tqdm\n'), ((24580, 24613), 'numpy.abs', 'num.abs', (["coord_dict_ul['dec_min']"], {}), "(coord_dict_ul['dec_min'])\n", (24587, 24613), True, 'import numpy as num\n'), ((24637, 24684), 'numpy.floor', 'num.floor', (["(param_dict['size_cube'] / z_delta_ul)"], {}), "(param_dict['size_cube'] / z_delta_ul)\n", (24646, 24684), True, 'import numpy as num\n'), ((25608, 25641), 'numpy.abs', 'num.abs', (["coord_dict_ur['dec_min']"], {}), "(coord_dict_ur['dec_min'])\n", (25615, 25641), True, 'import numpy as num\n'), ((25665, 25712), 'numpy.floor', 'num.floor', (["(param_dict['size_cube'] / z_delta_ur)"], {}), "(param_dict['size_cube'] / z_delta_ur)\n", (25674, 25712), True, 'import numpy as num\n'), ((39802, 39822), 'cosmo_utils.utils.work_paths.get_code_c', 'cwpaths.get_code_c', ([], {}), '()\n', (39820, 39822), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((46306, 46324), 'numpy.ones', 'num.ones', (['n_groups'], {}), '(n_groups)\n', (46314, 46324), True, 'import numpy as num\n'), ((51483, 51534), 'numpy.array', 'num.array', (['[haloid_counts[xx] for xx in haloid_arr]'], {}), '([haloid_counts[xx] for xx in haloid_arr])\n', (51492, 51534), True, 'import numpy as num\n'), ((51599, 51699), 'numpy.array', 'num.array', (["[gal_pd.loc[gal_pd['halo_hostid'] == xx, 'halo_ngal'].values[0] for xx in\n haloid_arr]"], {}), "([gal_pd.loc[gal_pd['halo_hostid'] == xx, 'halo_ngal'].values[0] for\n xx in haloid_arr])\n", (51608, 51699), True, 'import numpy as num\n'), ((60305, 60324), 'tqdm.tqdm', 'tqdm', (['catl_path_arr'], {}), '(catl_path_arr)\n', (60309, 60324), False, 'from tqdm import tqdm\n'), ((60377, 60423), 'cosmo_utils.utils.file_readers.read_hdf5_file_to_pandas_DF', 'cfreaders.read_hdf5_file_to_pandas_DF', (['catl_kk'], {}), '(catl_kk)\n', (60414, 60423), True, 'from cosmo_utils.utils import file_readers as cfreaders\n'), ((61187, 61226), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (61198, 61226), True, 'import matplotlib.pyplot as plt\n'), ((61245, 61293), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "(fname, bbox_inches='tight', dpi=400)\n", (61256, 61293), True, 'import matplotlib.pyplot as plt\n'), ((63857, 63876), 'tqdm.tqdm', 'tqdm', (['catl_path_arr'], {}), '(catl_path_arr)\n', (63861, 63876), False, 'from tqdm import tqdm\n'), ((63929, 63975), 'cosmo_utils.utils.file_readers.read_hdf5_file_to_pandas_DF', 'cfreaders.read_hdf5_file_to_pandas_DF', (['catl_kk'], {}), '(catl_kk)\n', (63966, 63975), True, 'from cosmo_utils.utils import file_readers as cfreaders\n'), ((64084, 64150), 'cosmo_utils.utils.stats_funcs.Bins_array_create', 'cstats.Bins_array_create', (["catl_kk_pd['abs_rmag']"], {'base': 'mr_bins_sep'}), "(catl_kk_pd['abs_rmag'], base=mr_bins_sep)\n", (64108, 64150), True, 'from cosmo_utils.utils import stats_funcs as cstats\n'), ((64644, 64683), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (64655, 64683), True, 'import matplotlib.pyplot as plt\n'), ((64702, 64750), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "(fname, bbox_inches='tight', dpi=400)\n", (64713, 64750), True, 'import matplotlib.pyplot as plt\n'), ((72914, 72954), 'numpy.arange', 'num.arange', (['(0)', '(n_hb_files + 1)', 'catl_step'], {}), '(0, n_hb_files + 1, catl_step)\n', (72924, 72954), True, 'import numpy as num\n'), ((73037, 73066), 'numpy.arange', 'num.arange', (['(0)', '(n_hb_files + 1)'], {}), '(0, n_hb_files + 1)\n', (73047, 73066), True, 'import numpy as num\n'), ((73493, 73590), 'multiprocessing.Process', 'Process', ([], {'target': 'multiprocessing_catls', 'args': '(hb_keys, param_dict, proj_dict, memb_tuples[ii])'}), '(target=multiprocessing_catls, args=(hb_keys, param_dict, proj_dict,\n memb_tuples[ii]))\n', (73500, 73590), False, 'from multiprocessing import Pool, Process, cpu_count\n'), ((2619, 2672), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (2645, 2672), False, 'import argparse\n'), ((7927, 7956), 'cosmo_utils.utils.file_utils.Program_Msg', 'cfutils.Program_Msg', (['__file__'], {}), '(__file__)\n', (7946, 7956), True, 'from cosmo_utils.utils import file_utils as cfutils\n'), ((14746, 14792), 'cosmo_utils.utils.file_readers.read_hdf5_file_to_pandas_DF', 'cfreaders.read_hdf5_file_to_pandas_DF', (['file_kk'], {}), '(file_kk)\n', (14783, 14792), True, 'from cosmo_utils.utils import file_readers as cfreaders\n'), ((14997, 15070), 'cosmo_utils.utils.file_readers.pandas_df_to_hdf5_file', 'cfreaders.pandas_df_to_hdf5_file', (['gal_pd_mod', 'file_mod_kk'], {'key': '"""gal_catl"""'}), "(gal_pd_mod, file_mod_kk, key='gal_catl')\n", (15029, 15070), True, 'from cosmo_utils.utils import file_readers as cfreaders\n'), ((15099, 15131), 'cosmo_utils.utils.file_utils.File_Exists', 'cfutils.File_Exists', (['file_mod_kk'], {}), '(file_mod_kk)\n', (15118, 15131), True, 'from cosmo_utils.utils import file_utils as cfutils\n'), ((15278, 15300), 'os.remove', 'os.remove', (['file_mod_kk'], {}), '(file_mod_kk)\n', (15287, 15300), False, 'import os\n'), ((19948, 19972), 'numpy.array', 'num.array', (['haloid_counts'], {}), '(haloid_counts)\n', (19957, 19972), True, 'import numpy as num\n'), ((32353, 32380), 'numpy.sum', 'num.sum', (['(cart_gals[kk] ** 2)'], {}), '(cart_gals[kk] ** 2)\n', (32360, 32380), True, 'import numpy as num\n'), ((41097, 41185), 'pandas.read_csv', 'pd.read_csv', (['grep_file'], {'sep': '"""\\\\s+"""', 'header': 'None', 'names': 'gal_names', 'index_col': '"""galid"""'}), "(grep_file, sep='\\\\s+', header=None, names=gal_names, index_col=\n 'galid')\n", (41108, 41185), True, 'import pandas as pd\n'), ((46617, 46659), 'numpy.random.choice', 'num.random.choice', (['gals_g_max.index.values'], {}), '(gals_g_max.index.values)\n', (46634, 46659), True, 'import numpy as num\n'), ((46684, 46713), 'numpy.asarray', 'num.asarray', (['g_galtype_groups'], {}), '(g_galtype_groups)\n', (46695, 46713), True, 'import numpy as num\n'), ((48220, 48237), 'numpy.arccos', 'num.arccos', (['z_val'], {}), '(z_val)\n', (48230, 48237), True, 'import numpy as num\n'), ((48419, 48444), 'numpy.arctan', 'num.arctan', (['(y_val / x_val)'], {}), '(y_val / x_val)\n', (48429, 48444), True, 'import numpy as num\n'), ((49002, 49057), 'cosmo_utils.mock_catalogues.mags_calculations.absolute_magnitude_to_luminosity', 'cmags.absolute_magnitude_to_luminosity', (['gal_mr_arr', '"""r"""'], {}), "(gal_mr_arr, 'r')\n", (49040, 49057), True, 'from cosmo_utils.mock_catalogues import mags_calculations as cmags\n'), ((64256, 64274), 'numpy.asarray', 'num.asarray', (['N_lum'], {}), '(N_lum)\n', (64267, 64274), True, 'import numpy as num\n'), ((67619, 67632), 'astropy.constants.c.to', 'ac.c.to', (['km_s'], {}), '(km_s)\n', (67626, 67632), True, 'import astropy.constants as ac\n'), ((72736, 72747), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (72745, 72747), False, 'from multiprocessing import Pool, Process, cpu_count\n'), ((2334, 2362), 'operator.attrgetter', 'attrgetter', (['"""option_strings"""'], {}), "('option_strings')\n", (2344, 2362), False, 'from operator import attrgetter\n'), ((14556, 14585), 'os.path.basename', 'os.path.basename', (['readme_file'], {}), '(readme_file)\n', (14572, 14585), False, 'import os\n'), ((14620, 14646), 'os.path.basename', 'os.path.basename', (['fig_file'], {}), '(fig_file)\n', (14636, 14646), False, 'import os\n'), ((36187, 36250), 'numpy.where', 'num.where', (['((ra_new_arr >= ra_min_limit) & (ra_new_arr <= 360.0))'], {}), '((ra_new_arr >= ra_min_limit) & (ra_new_arr <= 360.0))\n', (36196, 36250), True, 'import numpy as num\n'), ((36400, 36472), 'numpy.where', 'num.where', (["((ra_new_arr >= 0.0) & (ra_new_arr <= coord_dict_ii['ra_max']))"], {}), "((ra_new_arr >= 0.0) & (ra_new_arr <= coord_dict_ii['ra_max']))\n", (36409, 36472), True, 'import numpy as num\n'), ((36608, 36635), 'numpy.where', 'num.where', (['(ra_new_arr < 0.0)'], {}), '(ra_new_arr < 0.0)\n', (36617, 36635), True, 'import numpy as num\n'), ((46238, 46254), 'numpy.ones', 'num.ones', (['n_gals'], {}), '(n_gals)\n', (46246, 46254), True, 'import numpy as num\n'), ((67587, 67612), 'numpy.array', 'num.array', (['[czmin, czmax]'], {}), '([czmin, czmax])\n', (67596, 67612), True, 'import numpy as num\n'), ((67659, 67684), 'numpy.array', 'num.array', (['[czmin, czmax]'], {}), '([czmin, czmax])\n', (67668, 67684), True, 'import numpy as num\n'), ((15205, 15230), 'os.path.basename', 'os.path.basename', (['file_kk'], {}), '(file_kk)\n', (15221, 15230), False, 'import os\n'), ((32895, 32931), 'numpy.dot', 'num.dot', (['cart_gals[kk]', 'vel_gals[kk]'], {}), '(cart_gals[kk], vel_gals[kk])\n', (32902, 32931), True, 'import numpy as num\n'), ((33020, 33046), 'numpy.sum', 'num.sum', (['(vel_gals[kk] ** 2)'], {}), '(vel_gals[kk] ** 2)\n', (33027, 33046), True, 'import numpy as num\n'), ((36891, 36918), 'numpy.where', 'num.where', (['(ra_new_arr < 0.0)'], {}), '(ra_new_arr < 0.0)\n', (36900, 36918), True, 'import numpy as num\n'), ((71108, 71132), 'os.path.split', 'os.path.split', (['hb_key_ii'], {}), '(hb_key_ii)\n', (71121, 71132), False, 'import os\n'), ((64170, 64208), 'numpy.where', 'num.where', (["(catl_kk_pd['abs_rmag'] < xx)"], {}), "(catl_kk_pd['abs_rmag'] < xx)\n", (64179, 64208), True, 'import numpy as num\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.