hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794cbd265afa444f9dc39a789c28ef40dc335c90
| 3,584
|
py
|
Python
|
OdlApplication/testcode.py
|
eriksore/sdn
|
16eaa6a28bcbf957974e8339ea70724e604f5da9
|
[
"MIT"
] | null | null | null |
OdlApplication/testcode.py
|
eriksore/sdn
|
16eaa6a28bcbf957974e8339ea70724e604f5da9
|
[
"MIT"
] | null | null | null |
OdlApplication/testcode.py
|
eriksore/sdn
|
16eaa6a28bcbf957974e8339ea70724e604f5da9
|
[
"MIT"
] | null | null | null |
#External libraries
import sys
import json
import networkx as nx
from networkx.readwrite import json_graph
import httplib2
from xml.dom import minidom
from lxml import etree
#Own libraries
import restconf
import frontend
#Base URLs for Config and operational
baseUrl = 'http://192.168.231.246:8080'
confUrl = baseUrl + '/restconf/config/' #Contains data inserted via controller
operUrl = baseUrl + '/restconf/operational/' # Contains other data
findTopology = operUrl + '/network-topology:network-topology/topology/flow:1/'
#Specific REST URLs
h = httplib2.Http(".cache")
h.add_credentials('admin', 'admin')
flowIdCounter = int(100)
hosts = restconf.get_active_hosts()
srcIP = '10.0.0.1'
destIP = '10.0.0.2'
nodes = restconf.get_topology(restconf.get(findTopology))#['topology'][0]['node']
#print nodes
nodes = restconf.get_topology(restconf.get(findTopology))['topology'][0]['node']
for node in nodes:
print node['node-id']
tables = restconf.get('http://192.168.231.246:8080/restconf/operational/opendaylight-inventory:nodes/node/'+node['node-id'])
flowTables = json.loads(tables)
#print tables
try:
for table in flowTables['node'][0]['flow-node-inventory:table']:
if table['opendaylight-flow-table-statistics:flow-table-statistics']['opendaylight-flow-table-statistics:active-flows'] != 0:
print table['flow-node-inventory:id']
#print confUrl+'opendaylight-inventory:nodes/node/'+node['node-id']+'/table/'+str(table['flow-node-inventory:id'])
try:
flowRules = restconf.get(confUrl+'opendaylight-inventory:nodes/node/'+node['node-id']+'/table/'+str(table['flow-node-inventory:id']))
#print confUrl+'opendaylight-inventory:nodes/node/'+node['node-id']+'/table/'+str(table['flow-node-inventory:id'])
#flowRules = restconf.get(confUrl+'opendaylight-inventory:nodes/node/openflow:3/table/0')
rules = json.loads(flowRules)
print rules
except ValueError:
pass
#print rules['flow-node-inventory:table'][0]['flow-node-inventory:flow']
#flowRules['flow-node-inventory:table'][0]['flow-node-inventory:flow']
"""try:
for rule in flowRules:
if rule['flow-node-inventory:match']['flow-node-inventory:ipv4-destination'] == destIP:
print "found"
except KeyError:
pass"""
except KeyError:
pass
"""
tables = restconf.get('http://192.168.231.246:8080/restconf/operational/opendaylight-inventory:nodes/node/openflow:1')
flowtables = json.loads(tables)
print flowtables['node'][0]['flow-node-inventory:table']
flowRules = restconf.get(confUrl+'opendaylight-inventory:nodes/node/openflow:1/table/0')
rules = json.loads(flowRules)
print rules['flow-node-inventory:table'][0]['flow-node-inventory:flow']
#print flowRules
"""
"""
#print json.dumps(flows, indent=2)
#content = restconf.get('http://192.168.231.246:8080/restconf/config/opendaylight-inventory:nodes/node/openflow:1/table/0')#node/openflow:1/table/0/')
content = restconf.get('http://192.168.231.246:8080/restconf/operational/opendaylight-inventory:nodes/node/openflow:1')
flows = json.loads(content)
#print flows['flow-node-inventory:table'][0]['flow-node-inventory:flow']
#print json.dumps(flows['flow-node-inventory:table'][0]['flow-node-inventory:flow'], indent=2)
#print json.dumps(flows, indent=2)
"""
| 44.246914
| 153
| 0.666295
|
794cbd6d1d7d5a4eaa37e2fe6108a47794e82522
| 56,700
|
py
|
Python
|
SBMLDiagrams/drawNetwork.py
|
sys-bio/SBMLDiagrams
|
ff951ff987fadf61a25d239966134e7bbfa1ff1a
|
[
"MIT"
] | null | null | null |
SBMLDiagrams/drawNetwork.py
|
sys-bio/SBMLDiagrams
|
ff951ff987fadf61a25d239966134e7bbfa1ff1a
|
[
"MIT"
] | 20
|
2022-03-04T17:07:18.000Z
|
2022-03-30T22:22:24.000Z
|
SBMLDiagrams/drawNetwork.py
|
sys-bio/SBMLDiagrams
|
ff951ff987fadf61a25d239966134e7bbfa1ff1a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This script was initiated by Herbert Sauro, written by Jin Xu and available on Github
# https://github.com/SunnyXu/SBMLDiagrams
# This file includes all the functions to visualize or edit the SBML file.
"""
Created on Fri Jul 16 09:57:30 2021
@author: Jin Xu and Herbert Sauro
"""
import math
import random, string, os
from PIL import Image # to load images
from IPython.core.display import display
#colab requires Ipython.core.display instead of Ipython.display
import skia
from SBMLDiagrams import styleSBML
def _drawRectangle (canvas, x, y, width, height, outline, fill, linewidth, dash = False):
"""
Draw a rectangle on canvas.
Args:
canvas: skia.Canvas.
x: float-top left-hand corner position_x.
y: float-top left-hand corner position_y.
width: float-width of the rectangle.
height: float-height of the rectangle.
outline: skia.Color()-border color.
fill: skia.Color()-fill color; or list-[str-gradient_type,
list-gradient_info, list-stop_info], where gradient_type can be 'linearGradient' or
'radialGradient', while gradient_info and stop_info refers to setNodeFillLinearGradient()
and setNodeFillRadialGradient.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
rect = skia.Rect(x, y, x+width, y+height)
if type(fill) == int:
paintFill = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
Color = fill
)
else:
gradient_type = fill[0]
gradient_info = fill[1]
stop_info = fill[2]
stop_colors = []
stop_positions = []
for i in range(len(stop_info)):
stop_colors.append(skia.Color(stop_info[i][1][0], stop_info[i][1][1], stop_info[i][1][2], stop_info[i][1][3]))
stop_positions.append(stop_info[i][0]/100.)
if gradient_type == 'linearGradient':
paintFill = skia.Paint(
Shader=skia.GradientShader.MakeLinear(
points=[(x+width*gradient_info[0][0]/100., y+height*gradient_info[0][1]/100.),
(x+width*gradient_info[1][0]/100., y+height*gradient_info[1][1]/100.)],
colors=stop_colors,
positions = stop_positions)
)
elif gradient_type == 'radialGradient':
paintFill = skia.Paint(
Shader = skia.GradientShader.MakeRadial(
center=(x+width*gradient_info[0][0]/100., y+height*gradient_info[0][1]/100.),
radius=max(width,height)*gradient_info[1][0]/100.,
colors=stop_colors,
positions = stop_positions)
)
canvas.drawRect(rect, paintFill)
if dash:
paintStroke = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
StrokeWidth=linewidth,
Style = skia.Paint.kStroke_Style,
Color = outline
)
else:
paintStroke = skia.Paint(
AntiAlias=True,
StrokeWidth=linewidth,
Style = skia.Paint.kStroke_Style,
Color = outline
)
canvas.drawRect(rect, paintStroke)
def _drawRoundedRectangle (canvas, x, y, width, height, outline, fill, linewidth, dash = False):
"""
Draw a rounded rectangle on canvas.
Args:
canvas: skia.Canvas.
x: float-top left-hand corner position_x.
y: float-top left-hand corner position_y.
width: float-width of the rectangle.
height: float-height of the rectangle.
outline: skia.Color()-border color.
fill: skia.Color()-fill color; or list-[str-gradient_type,
list-gradient_info, list-stop_info], where gradient_type can be 'linearGradient' or
'radialGradient', while gradient_info and stop_info refers to setNodeFillLinearGradient()
and setNodeFillRadialGradient.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
radius = 1.*linewidth
rect = skia.Rect(x, y, x+width, y+height)
if type(fill) == int:
paintFill = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
Color = fill
)
else:
gradient_type = fill[0]
gradient_info = fill[1]
stop_info = fill[2]
stop_colors = []
stop_positions = []
for i in range(len(stop_info)):
stop_colors.append(skia.Color(stop_info[i][1][0], stop_info[i][1][1], stop_info[i][1][2], stop_info[i][1][3]))
stop_positions.append(stop_info[i][0]/100.)
if gradient_type == 'linearGradient':
paintFill = skia.Paint(
Shader=skia.GradientShader.MakeLinear(
points=[(x+width*gradient_info[0][0]/100., y+height*gradient_info[0][1]/100.),
(x+width*gradient_info[1][0]/100., y+height*gradient_info[1][1]/100.)],
colors=stop_colors,
positions = stop_positions)
)
elif gradient_type == 'radialGradient':
paintFill = skia.Paint(
Shader = skia.GradientShader.MakeRadial(
center=(x+width*gradient_info[0][0]/100., y+height*gradient_info[0][1]/100.),
radius=max(width,height)*gradient_info[1][0]/100.,
colors=stop_colors,
positions = stop_positions)
)
canvas.drawRoundRect(rect, radius, radius, paintFill)
if dash:
paintStroke = skia.Paint(
AntiAlias=True,
StrokeWidth=linewidth,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
Style = skia.Paint.kStroke_Style,
Color = outline
)
else:
paintStroke = skia.Paint(
AntiAlias=True,
StrokeWidth=linewidth,
Style = skia.Paint.kStroke_Style,
Color = outline
)
canvas.drawRoundRect(rect, radius, radius, paintStroke);
def _drawEllipse (canvas, x, y, width, height, outline, fill, linewidth, dash = False):
"""
Draw an ellipse on canvas.
Args:
canvas: skia.Canvas.
x: float-top left-hand corner position_x.
y: float-top left-hand corner position_y.
width: float-width of the rectangle.
height: float-height of the rectangle.
outline: skia.Color()-border color.
fill: skia.Color()-fill color; or list-[str-gradient_type,
list-gradient_info, list-stop_info], where gradient_type can be 'linearGradient' or
'radialGradient', while gradient_info and stop_info refers to setNodeFillLinearGradient()
and setNodeFillRadialGradient.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
rect = skia.Rect(x, y, x+width, y+height)
if type(fill) == int:
paintFill = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
Color = fill
)
else:
gradient_type = fill[0]
gradient_info = fill[1]
stop_info = fill[2]
stop_colors = []
stop_positions = []
for i in range(len(stop_info)):
stop_colors.append(skia.Color(stop_info[i][1][0], stop_info[i][1][1], stop_info[i][1][2], stop_info[i][1][3]))
stop_positions.append(stop_info[i][0]/100.)
if gradient_type == 'linearGradient':
paintFill = skia.Paint(
Shader=skia.GradientShader.MakeLinear(
points=[(x+width*gradient_info[0][0]/100., y+height*gradient_info[0][1]/100.),
(x+width*gradient_info[1][0]/100., y+height*gradient_info[1][1]/100.)],
colors=stop_colors,
positions = stop_positions)
)
elif gradient_type == 'radialGradient':
paintFill = skia.Paint(
Shader = skia.GradientShader.MakeRadial(
center=(x+width*gradient_info[0][0]/100., y+height*gradient_info[0][1]/100.),
radius=max(width,height)*gradient_info[1][0]/100.,
colors=stop_colors,
positions = stop_positions)
)
canvas.drawOval(rect, paintFill)
if dash:
paintStroke = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
StrokeWidth=linewidth,
Style = skia.Paint.kStroke_Style,
Color = outline
)
else:
paintStroke = skia.Paint(
AntiAlias=True,
StrokeWidth=linewidth,
Style = skia.Paint.kStroke_Style,
Color = outline
)
canvas.drawOval(rect, paintStroke)
def _drawCircle (canvas, x1, y1, w, h, outline, fill, linewidth, dash = False):
"""
Draw a circle within a certain size of rectangle on canvas.
Args:
canvas: skia.Canvas
x1: float-top left-hand corner position_x of the rectangle.
y1: float-top left-hand corner position_y of the rectangle.
w: float-width of the rectangle.
h: float-height of the rectangle.
outline: skia.Color()-border color.
fill: skia.Color()-fill color; or list-[str-gradient_type,
list-gradient_info, list-stop_info], where gradient_type can be 'linearGradient' or
'radialGradient', while gradient_info and stop_info refers to setNodeFillLinearGradient()
and setNodeFillRadialGradient.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
centerX = x1 + w/2
centerY = y1 + h/2
radius = .5*min(w,h) # the radius of the circle should be the half of the minimum of w and h
if type(fill) == int:
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
StrokeWidth=linewidth,
Color = fill
)
else:
gradient_type = fill[0]
gradient_info = fill[1]
stop_info = fill[2]
stop_colors = []
stop_positions = []
for i in range(len(stop_info)):
stop_colors.append(skia.Color(stop_info[i][1][0], stop_info[i][1][1], stop_info[i][1][2], stop_info[i][1][3]))
stop_positions.append(stop_info[i][0]/100.)
if gradient_type == 'linearGradient':
paint = skia.Paint(
Shader=skia.GradientShader.MakeLinear(
points=[(x1+w*gradient_info[0][0]/100., y1+h*gradient_info[0][1]/100.),
(x1+w*gradient_info[1][0]/100., y1+h*gradient_info[1][1]/100.)],
colors=stop_colors,
positions = stop_positions)
)
elif gradient_type == 'radialGradient':
paint = skia.Paint(
Shader = skia.GradientShader.MakeRadial(
center=(x1+w*gradient_info[0][0]/100., y1+h*gradient_info[0][1]/100.),
radius=max(w,h)*gradient_info[1][0]/100.,
colors=stop_colors,
positions = stop_positions)
)
canvas.drawCircle (centerX, centerY, radius, paint)
if dash:
paint = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
else:
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
canvas.drawCircle (centerX, centerY, radius, paint)
def _drawDimer (canvas, x1, y1, w, h, outline, fill, linewidth, dash = False):
"""
Draw a dimer (two circles) within a certain size of rectangle on canvas.
Args:
canvas: skia.Canvas.
x1: float-top left-hand corner position_x of the rectangle.
y1: float-top left-hand corner position_y of the rectangle.
w: float-width of the rectangle.
h: float-height of the rectangle.
outline: skia.Color()-border color.
fill: skia.Color()-fill color.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
radius = .25*min(w,h)
centerX1 = x1 + w/2 - radius
centerY1 = y1 + h/2
centerX2 = x1 + w/2 + radius
centerY2 = centerY1
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
StrokeWidth=linewidth,
Color = fill
)
canvas.drawCircle (centerX1, centerY1, radius, paint)
canvas.drawCircle (centerX2, centerY2, radius, paint)
if dash:
paint = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
else:
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
canvas.drawCircle (centerX1, centerY1, radius, paint)
canvas.drawCircle (centerX2, centerY2, radius, paint)
def _drawTrimer (canvas, x1, y1, w, h, outline, fill, linewidth, dash = False):
"""
Draw a trimer (three circles) within a certain size of rectangle on canvas.
Args:
canvas: skia.Canvas.
x1: float-top left-hand corner position_x of the rectangle.
y1: float-top left-hand corner position_y of the rectangle.
w: float-width of the rectangle.
h: float-height of the rectangle.
outline: skia.Color()-border color.
fill: skia.Color()-fill color.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
radius = .25*min(w,h)
centerX1 = x1 + w/2
centerY1 = y1 + h/2 - radius
centerX3 = x1 + w/2 - radius
centerY3 = y1 + h/2 + radius
centerX4 = x1 + w/2 + radius
centerY4 = centerY3
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
StrokeWidth=linewidth,
Color = fill
)
canvas.drawCircle (centerX1, centerY1, radius, paint)
canvas.drawCircle (centerX3, centerY3, radius, paint)
canvas.drawCircle (centerX4, centerY4, radius, paint)
if dash:
paint = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
else:
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
canvas.drawCircle (centerX1, centerY1, radius, paint)
canvas.drawCircle (centerX3, centerY3, radius, paint)
canvas.drawCircle (centerX4, centerY4, radius, paint)
def _drawTetramer (canvas, x1, y1, w, h, outline, fill, linewidth, dash = False):
"""
Draw a Tetramer (four circles) within a certain size of rectangle on canvas.
Args:
canvas: skia.Canvas.
x1: float-top left-hand corner position_x of the rectangle.
y1: float-top left-hand corner position_y of the rectangle.
w: float-width of the rectangle.
h: float-height of the rectangle.
outline: skia.Color()-border color.
fill: skia.Color()-fill color.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
radius = .25*min(w,h)
centerX1 = x1 + w/2 - radius
centerY1 = y1 + h/2 - radius
centerX2 = x1 + w/2 + radius
centerY2 = centerY1
centerX3 = centerX1
centerY3 = y1 + h/2 + radius
centerX4 = centerX2
centerY4 = centerY3
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
StrokeWidth=linewidth,
Color = fill
)
canvas.drawCircle (centerX1, centerY1, radius, paint)
canvas.drawCircle (centerX2, centerY2, radius, paint)
canvas.drawCircle (centerX3, centerY3, radius, paint)
canvas.drawCircle (centerX4, centerY4, radius, paint)
if dash:
paint = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
else:
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
canvas.drawCircle (centerX1, centerY1, radius, paint)
canvas.drawCircle (centerX2, centerY2, radius, paint)
canvas.drawCircle (centerX3, centerY3, radius, paint)
canvas.drawCircle (centerX4, centerY4, radius, paint)
def _drawPolygon (canvas, x, y, width, height, pts, outline, fill, linewidth, dash = False):
"""
Draw a polygon.
Args:
canvas: skia.Canvas.
x: float-top left-hand corner position_x of the rectangle.
y: float-top left-hand corner position_y of the rectangle
width: float-width of the rectangle.
height: float-height of the rectangle.
pts: list of 1*2 matrix: positions of the vertices/corners of the polygon.
outline: skia.Color()-border color.
fill: skia.Color()-fill color; or list-[str-gradient_type,
list-gradient_info, list-stop_info], where gradient_type can be 'linearGradient' or
'radialGradient', while gradient_info and stop_info refers to setNodeFillLinearGradient()
and setNodeFillRadialGradient.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
if type(fill) == int:
paintFill = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
Color = fill
)
else:
gradient_type = fill[0]
gradient_info = fill[1]
stop_info = fill[2]
stop_colors = []
stop_positions = []
for i in range(len(stop_info)):
stop_colors.append(skia.Color(stop_info[i][1][0], stop_info[i][1][1], stop_info[i][1][2], stop_info[i][1][3]))
stop_positions.append(stop_info[i][0]/100.)
if gradient_type == 'linearGradient':
paintFill = skia.Paint(
Shader=skia.GradientShader.MakeLinear(
points=[(x+width*gradient_info[0][0]/100., y+height*gradient_info[0][1]/100.),
(x+width*gradient_info[1][0]/100., y+height*gradient_info[1][1]/100.)],
colors=stop_colors,
positions = stop_positions)
)
elif gradient_type == 'radialGradient':
paintFill = skia.Paint(
Shader = skia.GradientShader.MakeRadial(
center=(x+width*gradient_info[0][0]/100., y+height*gradient_info[0][1]/100.),
radius=max(width,height)*gradient_info[1][0]/100.,
colors=stop_colors,
positions = stop_positions)
)
if dash:
paintStroke = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
else:
paintStroke = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = outline
)
paintFill.setColor (fill)
path = skia.Path()
path.moveTo (pts[0][0],pts[0][1])
for i in range (1, len (pts)):
path.lineTo (pts[i][0], pts[i][1])
path.close()
canvas.drawPath(path, paintFill)
paintStroke.setColor (outline)
canvas.drawPath(path, paintStroke)
def _drawLine (canvas, x1, y1, x2, y2, fill, linewidth, dash = False):
"""
Draw a line.
Args:
canvas: skia.Canvas.
x1: float-position_x of one end of the line.
y1: float-position_y of one end of the line.
x2: float-position_x of the other end of the line.
y2: float-position_y of the other end of the line.
fill: skia.Color()-fill color.
linewidth: float-line width.
dash: bool-dashline (True) or not (False as default).
"""
if dash:
paint = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make([10.0, 5.0, 2.0, 5.0], 0.0),
Style = skia.Paint.kFill_Style,
StrokeWidth=linewidth,
Color = fill
)
else:
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
StrokeWidth=linewidth,
Color = fill
)
canvas.drawLine (x1, y1, x2, y2, paint)
def addProgressBar(canvas, position, dimension, fill_percent, process_broder_width, color_style):
[x, y] = position
[width, height] = dimension
[f_width, f_height] = dimension[0], dimension[1]*fill_percent
process_border_color = color_style.getProcessBorderColor()
full_fill_color = color_style.getFullFillColor()
process_fill_color = color_style.getProcessFillColor()
outline = skia.Color(process_border_color[0], process_border_color[1], process_border_color[2], process_border_color[3])
com_fill = skia.Color(full_fill_color[0], full_fill_color[1], full_fill_color[2], full_fill_color[3])
process_fill = skia.Color(process_fill_color[0], process_fill_color[1], process_fill_color[2], process_fill_color[3])
linewidth = process_broder_width
_drawRectangle(canvas, x, y, -width, -height, outline, com_fill, linewidth)
_drawRectangle(canvas, x, y, -f_width, -f_height, outline, process_fill, 0)
def addCompartment(canvas, position, dimension, comp_border_color, comp_fill_color, comp_border_width):
"""
Add a compartment.
Args:
canvas: skia.Canvas.
position: list-1*2 matrix-top left-hand corner of the rectangle [position_x, position_y].
dimension: list-1*2 matrix-size of the rectangle [width, height].
comp_border_color: list-rgba 1*4 matrix-compartment border color.
comp_fill_color: list-rgba 1*4 matrix-compartment fill color.
comp_border_width: float-compartment border line width.
"""
[x, y] = position
[width, height] = dimension
outline = skia.Color(comp_border_color[0], comp_border_color[1], comp_border_color[2], comp_border_color[3])
fill = skia.Color(comp_fill_color[0], comp_fill_color[1], comp_fill_color[2], comp_fill_color[3])
linewidth = comp_border_width
if linewidth == 0 or linewidth < 0:
outline = fill
# _drawRectangle (canvas, x, y, width, height,
# outline=outline, fill = fill, linewidth=linewidth)
_drawRoundedRectangle (canvas, x, y, width, height, outline, fill, linewidth)
def addNode(canvas, floating_boundary_node, alias_node, position, dimension,
spec_border_color, spec_fill_color, spec_border_width,
shapeIdx, shape_name, shape_type, shape_info, complex_shape = ''):
"""
Add a node.
Args:
canvas: skia.Canvas.
floating_boundary_node: str-floating node ('floating') or not ('boundary').
alias_node: str-alias node ('alias') or not ('').
position: list-1*2 matrix-top left-hand corner of the rectangle [position_x, position_y].
dimension: list-1*2 matrix-size of the rectangle [width, height].
spec_border_color: list-rgba 1*4 matrix-species border color.
spec_fill_color: list-rgba 1*4 matrix-species fill color; or list-[str-gradient_type,
list-gradient_info, list-stop_info], where gradient_type can be 'linearGradient' or
'radialGradient', while gradient_info and stop_info refers to setNodeFillLinearGradient()
and setNodeFillRadialGradient.
spec_border_width: float-compartment border line width.
shapeIdx: int-0:text_only, 1:rectangle, 2:ellipse, 3:hexagon, 4:line, or 5:triangle;
6:upTriangle, 7:downTriangle, 8:leftTriangle, 9: rightTriangle.
shape_name: str-name of the node shape.
shape_type: str-type of the node shape: rectangle, ellipse, polygon.
shape_info: list-polygon:[[x1,y1],[x2,y2],[x3,y3],etc], ellipse:[[[x1,y1],[r1,r2]]];
where x,y,r are floating numbers from 0 to 100.
complex_shape: str-''(default), 'monomer', 'dimer', 'trimer', or 'tetramer'.
"""
[x, y] = position
[width, height] = dimension
outline = skia.Color(spec_border_color[0], spec_border_color[1], spec_border_color[2], spec_border_color[3])
if type(spec_fill_color[0]) == str:
fill = spec_fill_color
else:
fill = skia.Color(spec_fill_color[0], spec_fill_color[1], spec_fill_color[2], spec_fill_color[3])
linewidth = spec_border_width
if linewidth == 0 or linewidth < 0:
outline = fill
if floating_boundary_node == 'boundary':
linewidth = 2*linewidth
if complex_shape == '':
#Pls note that shapeIdx is different from Coyote
#shapeIdx = 0
if shape_type == 'rectangle' or shapeIdx == 1: #rectangle
if alias_node == 'alias':
_drawRoundedRectangle (canvas, x, y, width, height, outline, fill, linewidth, dash = True)
else:
_drawRoundedRectangle (canvas, x, y, width, height, outline, fill, linewidth)
elif shape_type == 'polygon':
pts = []
for ii in range(len(shape_info)):
pts.append([x+width*shape_info[ii][0]/100.,y+height*shape_info[ii][1]/100.])
if alias_node == 'alias':
_drawPolygon (canvas, x, y, width, height, pts, outline, fill, linewidth, dash=True)
else:
_drawPolygon (canvas, x, y, width, height, pts, outline, fill, linewidth)
elif shape_type == 'ellipse' or shapeIdx == 2:
#circle
# if alias_node == 'alias':
# _drawCircle (canvas, x, y, width, height,
# outline, fill, linewidth, dash=True)
# else:
# _drawCircle (canvas, x, y, width, height,
# outline, fill, linewidth)
if alias_node == 'alias':
_drawEllipse (canvas, x, y, width, height,
outline, fill, linewidth, dash=True)
else:
_drawEllipse (canvas, x, y, width, height,
outline, fill, linewidth)
elif complex_shape == 'monomer':
if alias_node == 'alias':
_drawCircle (canvas, x, y, width, height,
outline, fill, linewidth, dash=True)
else:
_drawCircle (canvas, x, y, width, height,
outline, fill, linewidth)
elif complex_shape == 'dimer':
if alias_node == 'alias':
_drawDimer (canvas, x, y, width, height,
outline, fill, linewidth, dash=True)
else:
_drawDimer (canvas, x, y, width, height,
outline, fill, linewidth)
elif complex_shape == 'trimer':
if alias_node == 'alias':
_drawTrimer (canvas, x, y, width, height,
outline, fill, linewidth, dash=True)
else:
_drawTrimer (canvas, x, y, width, height,
outline, fill, linewidth)
elif complex_shape == 'tetramer':
if alias_node == 'alias':
_drawTetramer (canvas, x, y, width, height,
outline, fill, linewidth, dash=True)
else:
_drawTetramer (canvas, x, y, width, height,
outline, fill, linewidth)
def addReaction(canvas, rxn_id, rct_position, prd_position, mod_position, center_position, handles,
rct_dimension, prd_dimension, mod_dimension, reaction_line_color, reaction_line_width,
reaction_line_type = 'bezier', show_bezier_handles = False, show_reaction_ids = False,
reaction_arrow_head_size = [2., 2.], scale = 1., reaction_dash = [], reverse = False,
showReversible = False):
"""
Add a reaction.
Args:
canvas: skia.Canvas.
rxn_id: str-reaction id.
rct_position: list-1*2 matrix: positions of each reactant.
prd_position: list-1*2 matrix: positions of each product.
mod_position: list-1*2 matrix: positions of each modifier.
center_position: list-1*2 matrix: position of the center.
handles: list-position of the handles: [center handle, reactant handles, product handles].
rct_dimension: list-1*2 matrix: dimension/size of each reactant.
prd_dimension: list-1*2 matrix: dimension/size of each product.
mod_dimension: list-1*2 matrix: dimension/size of each modifier.
reaction_line_color: list-rgba 1*4 matrix-species fill color.
reaction_line_width: float-reaction line width.
reactionLineType: str-type of the reaction line: 'straight' or 'bezier' (default).
showBezierHandles: bool-show the Bezier handles (True) or not (False as default).
show_reaction_ids: bool-show the reaction ids (True) or not (False as default).
reaction_arrow_head_size: list-1*2 matrix-size of the rectangle [width, height].
scale: float-makes the figure output size = scale * default output size.
reaction_dash: list - [] means solid;
[a,b] means drawing a a-point line and folloing a b-point gap and etc;
[a,b,c,d] means drawing a a-point line and folloing a b-point gap, and then
drawing a c-point line followed by a d-point gap.
reverse: bool-reversible reaction or not.
showReversible = False):
"""
def _cross_point(arcCenter, c2, s2):
"""
Get the cross point of a point and a rectangle with position(top left-hand corner) and size
given.
Args:
arcCenter: 1*2 matrix-position of the point.
c2: 1*2 matrix-position of the rectangle (top left-hand corner).
s2: 1*2 matrix-size of the rectangle.
"""
pt_center = [c2[0]+.5*s2[0], c2[1]+.5*s2[1]]
pt_up_left = c2
pt_up_right = [c2[0]+s2[0], c2[1]]
pt_down_left = [c2[0], c2[1]+s2[1]]
pt_down_right = [c2[0]+s2[0], c2[1]+s2[1]]
def _line_intersection(line1, line2):
"""
Args:
line1: list of 1*2 matrix-two points to represent line1.
line2: list of 1*2 matrix-two points to represent line2.
Returns:
[x,y]: 1*2 matrix-the point position of the crossed two lines.
"""
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def _det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = _det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect1')
d = (_det(*line1), _det(*line2))
x = round(_det(d, xdiff) / div,2)
y = round(_det(d, ydiff) / div,2)
if round((x-line1[0][0])*(x-line1[1][0]),2)<=0 and round((x-line2[0][0])*(x-line2[1][0]),2)<=0 \
and round((y-line1[0][1])*(y-line1[1][1]),2)<=0 and round((y-line2[0][1])*(y-line2[1][1]),2)<=0:
return [x, y]
else:
raise Exception('lines do not intersect2')
try:
[x,y] = _line_intersection([arcCenter, pt_center], [pt_up_left, pt_down_left])
return [x,y]
except:
pass
try:
[x,y] = _line_intersection([arcCenter, pt_center], [pt_up_left, pt_up_right])
return [x,y]
except:
pass
try:
[x,y] = _line_intersection([arcCenter, pt_center], [pt_down_left, pt_down_right])
return [x,y]
except:
pass
try:
[x,y] = _line_intersection([arcCenter, pt_center], [pt_up_right, pt_down_right])
return [x,y]
except:
pass
def _drawArrow (canvas, pts, fill):
"""
Draw an arrow.
Args:
canvas: skia.Canvas.
pts: list of 1*2 matrix: points of the arrows.
fill: skia.Color(): color of the arrow.
"""
paintFill = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kFill_Style,
Color = fill
)
paintStroke = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kStroke_Style,
Color = fill
)
paintFill.setColor (fill)
path = skia.Path()
path.moveTo (pts[0][0],pts[0][1])
for i in range (1, len (pts)):
path.lineTo (pts[i][0], pts[i][1])
path.close()
canvas.drawPath(path, paintFill)
paintStroke.setColor (fill)
canvas.drawPath(path, paintStroke)
def _drawBezier (pts, fillcolor, linewidth, reaction_dash = reaction_dash):
"""
Draw a bezier curve.
Args:
pts: list of 1*2 matrix: positions of src, h1, h2 and dest ([src, h1, h2, dest]).
fillcolor: skia.Color(): color of the bezier curve.
linewidth: line width of the bezier curve.
"""
src = pts[0]; h1 = pts[1]; h2 = pts[2]; dest = pts[3]
if len(reaction_dash) != 0:
paint = skia.Paint(
AntiAlias=True,
PathEffect=skia.DashPathEffect.Make(reaction_dash, 0.0),
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = fillcolor
)
else:
paint = skia.Paint(
AntiAlias=True,
Style = skia.Paint.kStroke_Style,
StrokeWidth=linewidth,
Color = fillcolor
)
path = skia.Path()
path.moveTo(src[0], src[1])
path.cubicTo(h1[0], h1[1], h2[0], h2[1], dest[0], dest[1])
canvas.drawPath(path, paint)
if show_bezier_handles:
_drawLine(canvas, src[0], src[1], h1[0], h1[1], fillcolor, .5*linewidth)
_drawLine(canvas, dest[0], dest[1], h2[0], h2[1], fillcolor, .5*linewidth)
_drawCircle(canvas, h1[0]-linewidth, h1[1]-linewidth, 2*linewidth, 2*linewidth,
fillcolor, fillcolor, .5*linewidth)
_drawCircle(canvas, h2[0]-linewidth, h2[1]-linewidth, 2*linewidth, 2*linewidth,
fillcolor, fillcolor, .5*linewidth)
nReactants = len(rct_position)
nProducts = len(prd_position)
arcCenter = center_position
linewidth = reaction_line_width
lineType = reaction_line_type
lineColor = skia.Color(reaction_line_color[0], reaction_line_color[1], reaction_line_color[2], reaction_line_color[3])
#arrow_s1 = 5*reaction_line_width
#arrow_s2 = 4*reaction_line_width
arrow_s2 = reaction_arrow_head_size[0] #width of the arrow
arrow_s1 = reaction_arrow_head_size[1] #height of the arrow
if show_reaction_ids:
addSimpleText(canvas, rxn_id, center_position, reaction_line_color,
text_line_width = 1, fontSize = 12.*scale)
if lineType == 'bezier':
center_handle_position = handles[0]
center_handle_position_prd = [2*arcCenter[0]-center_handle_position[0],2*arcCenter[1]-center_handle_position[1]]
src_handle = handles[1:nReactants+1]
dst_handle = handles[nReactants+1:nReactants+nProducts+1]
for i in range(nReactants):
pts = [center_position] #src (center_position), h1(center_handle), h2(rct/prd_handle), dst(rct/prd)
pts.append(center_handle_position)
rct_handle_position = src_handle[i]
pts.append(rct_handle_position)
c1 = rct_position[i]
s1 = rct_dimension[i]
try:
#to calculate the end point of the arrow called arrow_end_pt
arrow_end_pt = _cross_point(rct_handle_position, c1, s1)
line_end_pt = _cross_point(rct_handle_position,
[c1[0]-reaction_line_width,c1[1]-reaction_line_width],
[s1[0]+reaction_line_width*2,s1[1]+reaction_line_width*2])
if arrow_end_pt == None: #rct_handle_position could be inside the node
rct_handle_position = center_position
arrow_end_pt = _cross_point(rct_handle_position, c1, s1)
line_end_pt = _cross_point(rct_handle_position,
[c1[0]-reaction_line_width,c1[1]-reaction_line_width],
[s1[0]+reaction_line_width*2,s1[1]+reaction_line_width*2])
if reverse and showReversible:
#draw the arrow:
points = [arrow_end_pt]
distance = math.sqrt((arrow_end_pt[0]-rct_handle_position[0])**2 + (arrow_end_pt[1]-rct_handle_position[1])**2)
if distance != 0:
pts_y_m = arrow_end_pt[1] - (arrow_end_pt[1]-rct_handle_position[1])*arrow_s1/distance
pts_x_m = arrow_end_pt[0] - (arrow_end_pt[0]-rct_handle_position[0])*arrow_s1/distance
pts_y_l = pts_y_m + (arrow_end_pt[0]-rct_handle_position[0])*.5*arrow_s2/distance
pts_x_l = pts_x_m - (arrow_end_pt[1]-rct_handle_position[1])*.5*arrow_s2/distance
points.append([pts_x_l,pts_y_l])
points.append([pts_x_m, pts_y_m])
pts_y_r = pts_y_m - (arrow_end_pt[0]-rct_handle_position[0])*.5*arrow_s2/distance
pts_x_r = pts_x_m + (arrow_end_pt[1]-rct_handle_position[1])*.5*arrow_s2/distance
points.append([pts_x_r,pts_y_r])
else:
distance = math.sqrt((arrow_end_pt[0]-center_position[0])**2 + (arrow_end_pt[1]-center_position[1])**2)
pts_y_m = arrow_end_pt[1] - (arrow_end_pt[1]-center_position[1])*arrow_s1/distance
pts_x_m = arrow_end_pt[0] - (arrow_end_pt[0]-center_position[0])*arrow_s1/distance
pts_y_l = pts_y_m + (arrow_end_pt[0]-center_position[0])*.5*arrow_s2/distance
pts_x_l = pts_x_m - (arrow_end_pt[1]-center_position[1])*.5*arrow_s2/distance
points.append([pts_x_l,pts_y_l])
points.append([pts_x_m, pts_y_m])
pts_y_r = pts_y_m - (arrow_end_pt[0]-center_position[0])*.5*arrow_s2/distance
pts_x_r = pts_x_m + (arrow_end_pt[1]-center_position[1])*.5*arrow_s2/distance
points.append([pts_x_r,pts_y_r])
_drawArrow(canvas, points, lineColor)
if reverse and line_end_pt != None:
pts.append(line_end_pt)
_drawBezier(pts, lineColor, linewidth)
if arrow_end_pt != None:
pts.append(arrow_end_pt)
_drawBezier(pts, lineColor, linewidth)
except:
rct_center_position = [c1[0]+.5*s1[0], c1[1]+.5*s1[1]]
pts.append(rct_center_position)
_drawBezier(pts, lineColor, linewidth)
for i in range(nProducts):
pts = [center_position]
pts.append(center_handle_position_prd)
prd_handle_position = dst_handle[i]
pts.append(prd_handle_position)
c2 = prd_position[i]
s2 = prd_dimension[i]
try:
#to calculate the head point of the arrow called arrow_head_pt
arrow_head_pt = _cross_point(prd_handle_position, c2, s2)
line_head_pt = _cross_point(prd_handle_position,
[c2[0]-reaction_line_width,c2[1]-reaction_line_width],
[s2[0]+reaction_line_width*2,s2[1]+reaction_line_width*2])
if arrow_head_pt == None: #prd_handle_position could be inside the node
prd_handle_position = center_position
arrow_head_pt = _cross_point(prd_handle_position, c2, s2)
line_head_pt = _cross_point(prd_handle_position,
[c2[0]-reaction_line_width,c2[1]-reaction_line_width],
[s2[0]+reaction_line_width*2,s2[1]+reaction_line_width*2])
#draw the arrow:
points = [arrow_head_pt]
distance = math.sqrt((arrow_head_pt[0]-prd_handle_position[0])**2 + (arrow_head_pt[1]-prd_handle_position[1])**2)
if distance != 0:
pts_y_m = arrow_head_pt[1] - (arrow_head_pt[1]-prd_handle_position[1])*arrow_s1/distance
pts_x_m = arrow_head_pt[0] - (arrow_head_pt[0]-prd_handle_position[0])*arrow_s1/distance
pts_y_l = pts_y_m + (arrow_head_pt[0]-prd_handle_position[0])*.5*arrow_s2/distance
pts_x_l = pts_x_m - (arrow_head_pt[1]-prd_handle_position[1])*.5*arrow_s2/distance
points.append([pts_x_l,pts_y_l])
points.append([pts_x_m, pts_y_m])
pts_y_r = pts_y_m - (arrow_head_pt[0]-prd_handle_position[0])*.5*arrow_s2/distance
pts_x_r = pts_x_m + (arrow_head_pt[1]-prd_handle_position[1])*.5*arrow_s2/distance
points.append([pts_x_r,pts_y_r])
else:
distance = math.sqrt((arrow_head_pt[0]-center_position[0])**2 + (arrow_head_pt[1]-center_position[1])**2)
pts_y_m = arrow_head_pt[1] - (arrow_head_pt[1]-center_position[1])*arrow_s1/distance
pts_x_m = arrow_head_pt[0] - (arrow_head_pt[0]-center_position[0])*arrow_s1/distance
pts_y_l = pts_y_m + (arrow_head_pt[0]-center_position[0])*.5*arrow_s2/distance
pts_x_l = pts_x_m - (arrow_head_pt[1]-center_position[1])*.5*arrow_s2/distance
points.append([pts_x_l,pts_y_l])
points.append([pts_x_m, pts_y_m])
pts_y_r = pts_y_m - (arrow_head_pt[0]-center_position[0])*.5*arrow_s2/distance
pts_x_r = pts_x_m + (arrow_head_pt[1]-center_position[1])*.5*arrow_s2/distance
points.append([pts_x_r,pts_y_r])
_drawArrow(canvas, points, lineColor)
if line_head_pt != None:
pts.append(line_head_pt)
_drawBezier(pts, lineColor, linewidth)
else:
pts.append(arrow_head_pt)
_drawBezier(pts, lineColor, linewidth)
except:
prd_center_position = [c2[0]+.5*s2[0], c2[1]+.5*s2[1]]
pts.append(prd_center_position)
_drawBezier(pts, lineColor, linewidth)
elif lineType == 'straight':
for i in range (nReactants):
c1 = rct_position[i]
s1 = rct_dimension[i]
try:
#to calculate the head point of the arrow called arrow_end_pt
arrow_end_pt = _cross_point(arcCenter, c1, s1)
line_end_pt = _cross_point(arcCenter,
[c1[0]-reaction_line_width,c1[1]-reaction_line_width],
[s1[0]+reaction_line_width*2,s1[1]+reaction_line_width*2])
if arrow_end_pt == None:
#arcCenter is inside the node
arrow_end_pt = [c1[0]+.5*s1[0], c1[1]+.5*s1[1]]
line_end_pt = _cross_point(arcCenter,
[c1[0]-reaction_line_width,c1[1]-reaction_line_width],
[s1[0]+reaction_line_width*2,s1[1]+reaction_line_width*2])
if reverse and showReversible:
#draw the arrow:
points = [arrow_end_pt]
distance = math.sqrt((arrow_end_pt[0]-arcCenter[0])**2 + (arrow_end_pt[1]-arcCenter[1])**2)
pts_y_m = arrow_end_pt[1] - (arrow_end_pt[1]-arcCenter[1])*arrow_s1/distance
pts_x_m = arrow_end_pt[0] - (arrow_end_pt[0]-arcCenter[0])*arrow_s1/distance
pts_y_l = pts_y_m + (arrow_end_pt[0]-arcCenter[0])*.5*arrow_s2/distance
pts_x_l = pts_x_m - (arrow_end_pt[1]-arcCenter[1])*.5*arrow_s2/distance
points.append([pts_x_l,pts_y_l])
points.append([pts_x_m, pts_y_m])
pts_y_r = pts_y_m - (arrow_end_pt[0]-arcCenter[0])*.5*arrow_s2/distance
pts_x_r = pts_x_m + (arrow_end_pt[1]-arcCenter[1])*.5*arrow_s2/distance
points.append([pts_x_r,pts_y_r])
_drawArrow(canvas, points, lineColor)
except:
pass
if reverse and line_end_pt != None:
_drawLine(canvas, arcCenter[0], arcCenter[1], line_end_pt[0], line_end_pt[1],
lineColor, linewidth)
else:
_drawLine(canvas, arcCenter[0], arcCenter[1], arrow_end_pt[0], arrow_end_pt[1],
lineColor, linewidth)
for i in range (nProducts):
c2 = prd_position[i]
s2 = prd_dimension[i]
try:
#to calculate the head point of the arrow called arrow_head_pt
arrow_head_pt = _cross_point(arcCenter, c2, s2)
line_head_pt = _cross_point(arcCenter,
[c2[0]-reaction_line_width,c2[1]-reaction_line_width],
[s2[0]+reaction_line_width*2,s2[1]+reaction_line_width*2])
if arrow_head_pt == None:
#arcCenter is inside the node
arrow_head_pt = [c2[0]+.5*s2[0], c2[1]+.5*s2[1]]
line_head_pt = _cross_point(arcCenter,
[c2[0]-reaction_line_width,c2[1]-reaction_line_width],
[s2[0]+reaction_line_width*2,s2[1]+reaction_line_width*2])
#draw the arrow:
points = [arrow_head_pt]
distance = math.sqrt((arrow_head_pt[0]-arcCenter[0])**2 + (arrow_head_pt[1]-arcCenter[1])**2)
pts_y_m = arrow_head_pt[1] - (arrow_head_pt[1]-arcCenter[1])*arrow_s1/distance
pts_x_m = arrow_head_pt[0] - (arrow_head_pt[0]-arcCenter[0])*arrow_s1/distance
pts_y_l = pts_y_m + (arrow_head_pt[0]-arcCenter[0])*.5*arrow_s2/distance
pts_x_l = pts_x_m - (arrow_head_pt[1]-arcCenter[1])*.5*arrow_s2/distance
points.append([pts_x_l,pts_y_l])
points.append([pts_x_m, pts_y_m])
pts_y_r = pts_y_m - (arrow_head_pt[0]-arcCenter[0])*.5*arrow_s2/distance
pts_x_r = pts_x_m + (arrow_head_pt[1]-arcCenter[1])*.5*arrow_s2/distance
points.append([pts_x_r,pts_y_r])
_drawArrow(canvas, points, lineColor)
except:
pass
if line_head_pt != None:
_drawLine(canvas, arcCenter[0], arcCenter[1], line_head_pt[0], line_head_pt[1],
lineColor, linewidth)
else:
_drawLine(canvas, arcCenter[0], arcCenter[1], arrow_head_pt[0], arrow_head_pt[1],
lineColor, linewidth)
#draw modifiers:
modifier_lineColor = skia.Color(128, 0, 128)
modifier_linewidth = 2*scale
mod_num = len(mod_position)
for i in range(mod_num):
mod_start_virtual_x = .5*mod_dimension[i][0] + mod_position[i][0]
mod_start_virtual_y = .5*mod_dimension[i][1] + mod_position[i][1]
try:
[mod_start_x, mod_start_y] = _cross_point(arcCenter,
[mod_position[i][0]-.25*mod_dimension[i][0], mod_position[i][1]-.25*mod_dimension[i][1]],
[mod_dimension[i][0]*1.5, mod_dimension[i][1]*1.5])
[mod_end_x, mod_end_y] = _cross_point([mod_start_virtual_x, mod_start_virtual_y],
[arcCenter[0]-.5*mod_dimension[i][0],arcCenter[1]-.5*mod_dimension[i][1]], mod_dimension[i])
except:
mod_start_x = .5*mod_dimension[i][0] + mod_position[i][0]
mod_start_y = .5*mod_dimension[i][1] + mod_position[i][1]
[mod_end_x, mod_end_y] = arcCenter[0], arcCenter[1]
_drawLine(canvas, mod_start_x, mod_start_y, mod_end_x, mod_end_y,
modifier_lineColor, modifier_linewidth)
_drawCircle(canvas, mod_end_x-modifier_linewidth, mod_end_y-modifier_linewidth,
2*modifier_linewidth, 2*modifier_linewidth,
modifier_lineColor, modifier_lineColor, .5*modifier_linewidth)
def addText(canvas, txt_str, position, dimension,
text_line_color = [0, 0, 0, 255], text_line_width = 1., fontSize = 12.,
longText='auto-font'):
"""
Add the text.
Args:
canvas: skia.Canvas.
txt_str: str-the content of the text.
position: list-1*2 matrix-top left-hand corner of the rectangle [position_x, position_y].
dimension: list-1*2 matrix-size of the rectangle [width, height].
text_line_color: list-rgba 1*4 matrix-text line color.
text_line_width: float-text line width.
"""
#default fontSize is 12 in the function font = skia.Font(skia.Typeface())
fontColor = skia.Color(text_line_color[0], text_line_color[1], text_line_color[2], text_line_color[3])
paintText = skia.Paint(Color = fontColor, StrokeWidth=text_line_width)
font = skia.Font(skia.Typeface('Arial', skia.FontStyle.Bold()), fontSize)
text = skia.TextBlob.MakeFromString(txt_str, font)
twidth = font.measureText(txt_str)
#fontSize = font.getSize()
theight = font.getSpacing()
if longText == 'auto-font':
stop_flag_1 = False
stop_flag_2 = False
count_while = 0
while stop_flag_1 == False and stop_flag_2 == False:
#default fontSize is 12 in the function font = skia.Font(skia.Typeface())
fontColor = skia.Color(text_line_color[0], text_line_color[1], text_line_color[2], text_line_color[3])
paintText = skia.Paint(Color = fontColor, StrokeWidth=text_line_width)
font = skia.Font(skia.Typeface('Arial', skia.FontStyle.Bold()), fontSize)
text = skia.TextBlob.MakeFromString(txt_str, font)
twidth = font.measureText(txt_str)
#fontSize = font.getSize()
theight = font.getSpacing()
if dimension[0] > (twidth+4.*text_line_width) and dimension[1] > (theight+4.*text_line_width):
stop_flag_1 = True
position = [position[0], position[1] + theight - dimension[1]*0.1] #adjust of the text position
position_x = position[0] + .5*(dimension[0] - twidth)
position_y = position[1] + .5*(dimension[1] - theight)
else:
# Decrease the size of the text (fontsize) to accomodate the text boundingbox/node bounding box
fontSize = fontSize - 1.
count_while += 1
if count_while > 20:
stop_flag_1 = True
position = [position[0], position[1] + theight - dimension[1]*0.1] #adjust of the text position
position_x = position[0] + .5*(dimension[0] - twidth)
position_y = position[1] + .5*(dimension[1] - theight)
elif longText == 'ellipsis':
txt_str_len = len(txt_str)
stop_flag_1 = False
stop_flag_2 = False
count_while = 0
while stop_flag_1 == False and stop_flag_2 == False:
fontColor = skia.Color(text_line_color[0], text_line_color[1], text_line_color[2], text_line_color[3])
paintText = skia.Paint(Color = fontColor, StrokeWidth=text_line_width)
font = skia.Font(skia.Typeface('Arial', skia.FontStyle.Bold()), fontSize)
text = skia.TextBlob.MakeFromString(txt_str, font)
twidth = font.measureText(txt_str)
#fontSize = font.getSize()
theight = font.getSpacing()
if dimension[0] > (twidth+4.*text_line_width) and dimension[1] > (theight+4.*text_line_width):
stop_flag_1 = True
position = [position[0], position[1] + theight - dimension[1]*0.1] #adjust of the text position
position_x = position[0] + .5*(dimension[0] - twidth)
position_y = position[1] + .5*(dimension[1] - theight)
else:
# Decrease the size of the text (fontsize) to accomodate the text boundingbox/node bounding box
txt_str_len = txt_str_len - 1
txt_str = txt_str[:txt_str_len] + '....'
count_while += 1
if count_while > 20:
stop_flag_1 = True
position = [position[0], position[1] + theight - dimension[1]*0.1] #adjust of the text position
position_x = position[0] + .5*(dimension[0] - twidth)
position_y = position[1] + .5*(dimension[1] - theight)
else:
position = [position[0], position[1] + theight - dimension[1]*0.1] #adjust of the text position
position_x = position[0] + .5*(dimension[0] - twidth)
position_y = position[1] + .5*(dimension[1] - theight)
canvas.drawTextBlob(text, position_x, position_y, paintText)
def addSimpleText(canvas, text, position, text_line_color, text_line_width=1, fontSize = 12):
fontColor = skia.Color(text_line_color[0], text_line_color[1], text_line_color[2], text_line_color[3])
font = skia.Font(skia.Typeface('Arial', skia.FontStyle.Bold()), fontSize)
paintText = skia.Paint(Color=fontColor, StrokeWidth=text_line_width)
canvas.drawSimpleText(text, position[0], position[1], font, paintText)
def showPlot(surface, save = True, folderName = '', fileName = '', file_format = 'PNG', showImage = True):
"""
Display the diagram and save it to the local.
Args:
surface: skia.Surface.
fileName: str-the name for the generated file: either the input filename or
temp.png if '' (default) in order to show the plots only instead of saving files.
fileFormat = 'PNG' (default) or 'JPEG'.
folderName = name for the folder to save the images
Returns:
the drew image array
"""
if folderName:
if not os.path.exists(os.getcwd() + '/' + folderName):
os.makedirs(os.getcwd() + '/' + folderName)
if fileName == '':
#random_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
#tmpfileName = os.path.join(os.getcwd() + '/' + folderName, random_string)
#shows the plot only instead of saving the files
tmpfileName = 'temp'
image = surface.makeImageSnapshot()
if save:
tmpfileName = tmpfileName + '.png'
image.save(tmpfileName, skia.kPNG)
if showImage:
pil_im = Image.open(tmpfileName)
display(pil_im)
#pil_im.show()
#self.surface.write_to_png(tmpfileName)
else:
fileName = os.path.join(os.getcwd() + '/' + folderName,fileName)
image = surface.makeImageSnapshot()
if save:
if file_format == 'PNG':
fileName = fileName + '.png'
image.save(fileName, skia.kPNG)
if showImage:
pil_im = Image.open(fileName)
display(pil_im)
elif file_format == 'JPEG':
fileName = fileName + '.jpg'
image.save(fileName, skia.kJPEG)
if showImage:
pil_im = Image.open(fileName)
display(pil_im)
elif file_format == 'PDF':
fileName = fileName + '.png'
image.save(fileName, skia.kPNG)
if showImage:
pil_im = Image.open(fileName)
display(pil_im)
#pil_im.show()
# imagepdf = pil_im.convert('RGB')
# imagepdf.save(fileNamepdf)
return image.toarray()
| 42.663657
| 131
| 0.588025
|
794cbd78c5602c789d8bdb55a3e7eb9e93121d12
| 29,945
|
py
|
Python
|
src/interface/Python/paramonte/_paradram.py
|
shahmoradi/paramonte-1
|
77c81c14e475bfacb19fa6de1f41629380e453d3
|
[
"MIT"
] | null | null | null |
src/interface/Python/paramonte/_paradram.py
|
shahmoradi/paramonte-1
|
77c81c14e475bfacb19fa6de1f41629380e453d3
|
[
"MIT"
] | null | null | null |
src/interface/Python/paramonte/_paradram.py
|
shahmoradi/paramonte-1
|
77c81c14e475bfacb19fa6de1f41629380e453d3
|
[
"MIT"
] | 1
|
2020-09-19T03:45:07.000Z
|
2020-09-19T03:45:07.000Z
|
####################################################################################################################################
####################################################################################################################################
####
#### MIT License
####
#### ParaMonte: plain powerful parallel Monte Carlo library.
####
#### Copyright (C) 2012-present, The Computational Data Science Lab
####
#### This file is part of the ParaMonte library.
####
#### Permission is hereby granted, free of charge, to any person obtaining a
#### copy of this software and associated documentation files (the "Software"),
#### to deal in the Software without restriction, including without limitation
#### the rights to use, copy, modify, merge, publish, distribute, sublicense,
#### and/or sell copies of the Software, and to permit persons to whom the
#### Software is furnished to do so, subject to the following conditions:
####
#### The above copyright notice and this permission notice shall be
#### included in all copies or substantial portions of the Software.
####
#### THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#### EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#### MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#### IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#### DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
#### OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
#### OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
####
#### ACKNOWLEDGMENT
####
#### ParaMonte is an honor-ware and its currency is acknowledgment and citations.
#### As per the ParaMonte library license agreement terms, if you use any parts of
#### this library for any purposes, kindly acknowledge the use of ParaMonte in your
#### work (education/research/industry/development/...) by citing the ParaMonte
#### library as described on this page:
####
#### https://github.com/cdslaborg/paramonte/blob/master/ACKNOWLEDGMENT.md
####
####################################################################################################################################
####################################################################################################################################
import numpy as np
import typing as tp
from _ParaMonteSampler import ParaMonteSampler
from _TabularFileContents import TabularFileContents
import _paramonte as pm
newline = pm.newline
####################################################################################################################################
#### ParaDRAM class
####################################################################################################################################
class ParaDRAM(ParaMonteSampler):
"""
This is the **ParaDRAM** class to generate instances of **serial** and **parallel**
**Delayed-Rejection Adaptive Metropolis-Hastings Markov Chain Monte Carlo**
sampler class of the ParaMonte library. The ``ParaDRAM`` class is a
child of the ``ParaMonteSampler`` class.
All ParaDRAM class attributes (input arguments to the ParaDRAM constructor)
are optional and all attributes can be also set after a ParaDRAM instance
is returned by the constructor.
Once you set the optional attributes to your desired values,
call the ParaDRAM sampler via the object's method ``runSampler()``.
.. _example-serial-usage:
**Example serial usage**
Copy and paste the following code enclosed between the
two comment lines in your python/ipython/jupyter session
(ensure the indentations of the pasted lines comply with Python rules):
.. code-block:: python
:linenos:
##################################
import paramonte as pm
import numpy as np
def getLogFunc(point):
# return the log of the standard multivariate
# Normal density function with ndim dimensions
return -0.5 * np.sum( np.double( point )**2 )
pmpd = pm.ParaDRAM()
pmpd.runSampler ( ndim = 4 # assume 4-dimensional objective function
, getLogFunc = getLogFunc # the objective function
)
##################################
where,
ndim
represents the number of dimensions of the domain of
the user's objective function ``getLogFunc(point)`` and,
getLogFunc(point)
represents the user's objective function to be sampled,
which must take a single input argument ``point`` of type
numpy-float64 array of length ``ndim`` and must return the
natural logarithm of the objective function.
.. _example-parallel-usage:
**Example parallel usage**
Copy and paste the following code enclosed between the
two comment lines in your python/ipython/jupyter session
(ensure the indentations of the pasted lines comply with Python rules):
.. code-block:: python
:linenos:
##################################
with open("main.py", "w") as file:
file.write ('''
import paramonte as pm
import numpy as np
def getLogFunc(point):
# return the log of the standard multivariate
# Normal density function with ndim dimensions
return -0.5 * np.sum( np.double( point )**2 )
pmpd = pm.ParaDRAM()
pmpd.mpiEnabled = True
pmpd.runSampler ( ndim = 4 # assume 4-dimensional objective function
, getLogFunc = getLogFunc # the objective function
)
''')
##################################
where,
ndim
represents the number of dimensions of the domain of
the user's objective function ``getLogFunc(point)`` and,
getLogFunc(point)
represents the user's objective function that is to be sampled.
This function must take a single input argument ``point`` of type
numpy-float64 array of length ndim and must return the natural
logarithm of the objective function.
mpiEnabled
is a logical (boolean) indicator that, if ``True``, will
cause the ParaDRAM simulation to run in parallel
on the requested number of processors.
The default value is ``False``.
The above will generate a Parallel-ParaDRAM-simulation Python script in the
current working directory of Python. Note the only difference between the
serial and parallel simulation scripts: the extra line ``pmpd.mpiEnabled = True``
which tell the ParaMonte library to run the simulation in parallel.
Assuming that you already have an MPI runtime library installed on your
system (see below), you can now execute this Python script file ``main.py``
in parallel in two ways:
1. from inside ipython or jupyter, type the following,
.. code-block:: bash
!mpiexec -n 3 python main.py
2. outside of Python environment,
from within a Bash shell (on Linux or Mac) or,
from within an Anaconda command prompt on Windows,
type the following,
.. code-block:: bash
mpiexec -n 3 python main.py
**Note:**
On Windows platform, if you are using the Intel MPI library,
we recommend that you also specify the extra flag -localonly,
.. code-block:: bash
mpiexec -localonly -n 3 python main.py
This will cause the simulations to run in parallel only on a single node,
but more importantly, it will also prevent the use of Hydra service and
the requirement for its registration. If you are not on a Windows cluster,
(e.g., you are using your personal device), then we highly recommend
specifying this flag.
In all cases in the above, the script ``main.py`` will run on 3 processors.
Feel free to change the number of processors to any number desired. But do
not request more than the available number of physical cores on your system.
**Tips on parallel usage**
For up-to-date detailed instructions on how to run simulations in parallel visit:
https://www.cdslab.org/paramonte
You can also use the following commands on the Python command-line,
.. code-block:: python
:linenos:
##################################
import paramonte as pm
pm.verify() # verify the existence of parallel simulation prerequisites
##################################
to obtain specific information on how to run a parallel simulation,
in particular, in relation to your current installation of ParaMonte.
In general, for parallel simulations:
0. Ensure you need and will get a speedup by running the ParaDRAM sampler
in parallel. Typically, if a single evaluation of the objective function
takes much longer than a few milliseconds, your simulation may then
benefit from the parallel run.
1. Ensure you have an MPI library installed, preferably, the Intel MPI
runtime libraries. An MPI library should be automatically installed
on your system with ParaMonte. If needed, you can download the Intel
MPI library from their website and install it.
2. Ensure the ParaDRAM object property ``mpiEnabled`` is ``True``
(the default is ``False``).
3. Before running the parallel simulation, in particular, on Windows systems,
you may need to define the necessary MPI environmental variables on your system.
To get information on how to define the variables, use the paramonte module's
function, ``verify()``, as described in the above.
4. Call your main Python code from a Python-aware mpiexec-aware command-line via,
.. code-block:: bash
mpi_launcher -n num_process python name_of_yor_python_code.py
where,
1. "mpi_launcher" is the name of the MPI launcher
of the MPI runtime library that you have installed.
For example, the Intel MPI library's launcher is named mpiexec,
also recognized by Microsoft, MPICH, and OpenMPI.
Note that on supercomputers, the MPI launcher is usually
something other than ``mpiexec``, for example:
``ibrun``, ``mpirun``, ...
2. "num_process" represents the number of cores
on which you want to run the program. Replace this
with the an integer number, like, 3 (meaning 3 cores).
Do not assign more processes than the available number of
physical cores on your device/cluster. Assigning more cores
than physically available on your system will only slow down
your simulation.
Once the above script is saved in the file ``main.py``, open a Python-aware and
MPI-aware command prompt to run the simulation in parallel via the MPI launcher,
.. code-block:: bash
mpiexec -n 3 python main.py
This will execute the Python script ``main.py`` on three processes (images).
Keep in mind that on Windows systems you may need to define MPI environmental
variables before a parallel simulation, as described in the above.
**ParaDRAM Class Attributes**
See also:
https://www.cdslab.org/paramonte/notes/usage/paradram/specifications/
All input specifications (attributes) of a ParaDRAM simulation are optional.
However, it is recommended that you provide as much information as possible
about the specific ParaDRAM simulation and the objective function to be sampled
via ParaDRAM simulation specifications.
The ParaDRAM simulation specifications have lengthy comprehensive descriptions
that appear in full in the output report file of every ParaDRAM simulation.
The best way to learn about individual ParaDRAM simulation attributes
is to a run a minimal serial simulation with the following Python script,
.. code-block:: python
:linenos:
##################################
from paramonte import ParaDRAM
pmpd = ParaDRAM()
pmpd.spec.outputFileName = "./test"
def getLogFunc(point): return -sum(point**2)
pmpd.runSampler( ndim = 1, getLogFunc = getLogFunc )
##################################
Running this code will generate a set of simulation output files (in the current
working directory of Python) that begin with the prefix ``test_process_1``. Among
these, the file ``test_process_1_report.txt`` contains the full description of all
input specifications of the ParaDRAM simulation as well as other information
about the simulation results and statistics.
**Parameters**
None. The simulation specifications can be set once an object is instantiated.
All simulation specification descriptions are collectively available at:
https://www.cdslab.org/paramonte/notes/usage/paradram/specifications/
Note that this is the new interface. The previous ParaDRAM class interface
used to optionally take all simulation specifications as input. However,
overtime, this approach has become more of liability than any potential
benefit. All simulation specifications have to be now to be set solely
after a ParaDRAM object is instantiated, instead of setting the
specifications via the ParaDRAM class constructor.
**Attributes**
buildMode
optional string argument with the default value "release".
possible choices are:
"debug"
to be used for identifying sources of bug
and causes of code crash.
"release"
to be used in all other normal scenarios
for maximum runtime efficiency.
mpiEnabled
optional logical (boolean) indicator which is ``False`` by default.
If it is set to ``True``, it will cause the ParaDRAM simulation
to run in parallel on the requested number of processors.
See the class documentation guidelines in the above for
information on how to run a simulation in parallel.
reportEnabled
optional logical (boolean) indicator which is ``True`` by default.
If it is set to ``True``, it will cause extensive guidelines to be
printed on the standard output as the simulation or post-processing
continues with hints on the next possible steps that could be taken
in the process. If you do not need such help and information set
this variable to ``False`` to silence all output messages.
inputFile
optional string input representing the path to
an external input namelist of simulation specifications.
USE THIS OPTIONAL ARGUMENT WITH CAUTION AND
ONLY IF YOU KNOW WHAT YOU ARE DOING.
**WARNING**
Specifying an input file will cause the ParaDRAM sampler
to ignore all other simulation specifications set by the
user via sampler instance's `spec`-component attributes.
spec
A frozen class containing all simulation specifications.
All simulation attributes are by default set to appropriate
values at runtime. To override the default simulation
specifications, set the `spec` attributes to some
desired values of your choice. For possible values, see:
https://www.cdslab.org/paramonte/notes/usage/paradram/specifications/
If you need help on any of the simulation specifications, try
the supplied ``helpme()`` function in this component, like,
.. code-block:: python
:linenos:
##################################
import paramonte as pm
pmpd = pm.ParaDRAM() # instantiate a ParaDRAM sampler class
pmpd.spec.helpme() # get help on all simulation specification
pmpd.spec.helpme("chainSize") # get help on "chainSize" specifically
##################################
**Methods**
See below for information on the methods.
**Returns**
Object of class ParaDRAM sampler.
---------------------------------------------------------------------------
"""
def __init__(self):
"""
The constructor for ParaDRAM class.
All input parameters are optional and all class
attributes can be changed after the object construction.
**Parameters**
None
"""
super().__init__(methodName = "ParaDRAM")
## ParaMonte specifications
#
#self.spec = pm.utils.FrozenClass()
#
## ParaMonte variables
#self.spec.sampleSize = sampleSize
#self.spec.randomSeed = randomSeed
#self.spec.description = description
#self.spec.outputFileName = outputFileName
#self.spec.outputDelimiter = outputDelimiter
#self.spec.chainFileFormat = chainFileFormat
#self.spec.variableNameList = variableNameList
#self.spec.restartFileFormat = restartFileFormat
#self.spec.outputColumnWidth = outputColumnWidth
#self.spec.outputRealPrecision = outputRealPrecision
#self.spec.silentModeRequested = silentModeRequested
#self.spec.domainLowerLimitVec = domainLowerLimitVec
#self.spec.domainUpperLimitVec = domainUpperLimitVec
#self.spec.parallelizationModel = parallelizationModel
#self.spec.progressReportPeriod = progressReportPeriod
#self.spec.targetAcceptanceRate = targetAcceptanceRate
#self.spec.mpiFinalizeRequested = mpiFinalizeRequested
#self.spec.maxNumDomainCheckToWarn = maxNumDomainCheckToWarn
#self.spec.maxNumDomainCheckToStop = maxNumDomainCheckToStop
## ParaMCMC variables
#self.spec.chainSize = chainSize
#self.spec.scaleFactor = scaleFactor
#self.spec.startPointVec = startPointVec
#self.spec.proposalModel = proposalModel
#self.spec.proposalStartCovMat = proposalStartCovMat
#self.spec.proposalStartCorMat = proposalStartCorMat
#self.spec.proposalStartStdVec = proposalStartStdVec
#self.spec.sampleRefinementCount = sampleRefinementCount
#self.spec.sampleRefinementMethod = sampleRefinementMethod
#self.spec.randomStartPointRequested = randomStartPointRequested
#self.spec.randomStartPointDomainLowerLimitVec = randomStartPointDomainLowerLimitVec
#self.spec.randomStartPointDomainUpperLimitVec = randomStartPointDomainUpperLimitVec
## ParaDRAM variables
#self.spec.adaptiveUpdateCount = adaptiveUpdateCount
#self.spec.adaptiveUpdatePeriod = adaptiveUpdatePeriod
#self.spec.greedyAdaptationCount = greedyAdaptationCount
#self.spec.delayedRejectionCount = delayedRejectionCount
#self.spec.burninAdaptationMeasure = burninAdaptationMeasure
#self.spec.delayedRejectionScaleFactorVec = delayedRejectionScaleFactorVec
#
#self.spec.helpme = SpecDRAM.helpme
#self.spec._freeze()
################################################################################################################################
#### runSampler
################################################################################################################################
def runSampler ( self
, ndim : int
, getLogFunc : tp.Callable[[tp.List[float]], float]
, inputFile : tp.Optional[str] = None
) -> None:
"""
Run ParaDRAM sampler and return nothing.
**Parameters**
ndim
An integer representing the number of dimensions of the
domain of the user's objective function ``getLogFunc(point)``.
It must be a positive integer.
getLogFunc(point)
represents the user's objective function to be sampled,
which must take a single input argument ``point`` of type
numpy-float64 array of length ``ndim`` and must return the
natural logarithm of the objective function.
inputFile (optional)
A string input representing the path to an external
input namelist of simulation specifications.
**WARNING**
Use this optional argument with caution and only
if you know what you are doing. Specifying this option
will cause the sampler to ignore all other simulation
specifications set by the user via the ``spec``
component of the sampler instance.
**Returns**
None
"""
if not isinstance(ndim,int) or ndim<1:
pm.abort( msg = "The input argument ndim must be a positive integer," + newline
+ "representing the number of dimensions of the domain of" + newline
+ "the user's objective function getLogFunc()." + newline
+ "You have entered ndim = " + str(ndim)
, methodName = self._methodName
, marginTop = 1
, marginBot = 1
)
if not callable(getLogFunc):
pm.abort( msg = "The input argument getLogFunc must be a callable function." + newline
+ "It represents the user's objective function to be sampled," + newline
+ "which must take a single input argument of type numpy" + newline
+ "float64 array of length ndim and must return the" + newline
+ "natural logarithm of the objective function."
, methodName = self._methodName
, marginTop = 1
, marginBot = 1
)
if inputFile is not None and not isinstance(inputFile,str):
pm.abort( msg = "The input argument ``inputFile`` must be of type str." + newline
+ "It is an optional string input representing the path to" + newline
+ "an external input namelist of simulation specifications." + newline
+ "USE THIS OPTIONAL ARGUMENT WITH CAUTION AND" + newline
+ "ONLY IF YOU KNOW WHAT YOU ARE DOING." + newline
+ "Specifying this option will cause the sampler to ignore" + newline
+ "all other simulation specifications set by the user via" + newline
+ "the ``spec`` component of the sampler instance." + newline
+ "You have entered inputFile = " + str(inputFile)
, methodName = self._methodName
, marginTop = 1
, marginBot = 1
)
def getLogFunc2arg(ndim,point):
PointVec = np.array(point[0:ndim])
return getLogFunc(PointVec)
self._runSampler( ndim
, getLogFunc2arg
, inputFile
)
################################################################################################################################
#### readMarkovChain
################################################################################################################################
def readMarkovChain ( self
, file : tp.Optional[str] = None
, delimiter : tp.Optional[str] = None
, parseContents : tp.Optional[bool] = True
, renabled : tp.Optional[bool] = False
) -> tp.List[TabularFileContents] :
"""
Return a list of the unweighted verbose (Markov-chain) contents
of a set of ParaDRAM output chain files, whose names begin the
user-provided input variable ``file``. This method is to be only
used for the postprocessing of the output chain file(s) of an
already finished ParaDRAM simulation. It is not meant to be called
by all processes in parallel mode, although it is possible.
**Parameters**
file (optional)
A string representing the path to the chain file with
the default value of ``None``.
The path only needs to uniquely identify the simulation
to which the chain file belongs. For example, specifying
``"./mydir/mysim"`` as input will lead to a search for a file
that begins with ``"mysim"`` and ends with ``"_chain.txt"``
inside the directory ``"./mydir/"``. If there are multiple
files with such name, then all of them will be read
and returned as a list.
If this input argument is not provided by the user, the
value of the object attribute ``outputFileName`` will be
used instead. At least one of the two mentioned routes
must provide the path to the chain file otherwise,
this method will break by calling ``sys.exit()``.
delimiter (optional)
An input string representing the delimiter used in the
output chain file. If it is not provided as input argument, the
value of the corresponding object attribute ``outputDelimiter``
will be used instead. If none of the two are available,
the default comma delimiter ``","`` will be assumed and used.
parseContents (optional)
If set to ``True``, the contents of the file will be parsed
and stored in a component of the object named ``contents``.
The default value is ``True``.
renabled (optional)
If set to False, the contents of the file(s) will be stored
as a list in a (new) component of the ParaDRAM object named
``markovChainList`` and ``None`` will be the return value
of the method. If set to True, the reverse will done.
The default value is ``False``.
**Returns**
A list of objects, each of which has the following properties:
file
The full absolute path to the chain file.
delimiter
The delimiter used in the chain file.
ndim
The number of dimensions of the domain of the objective
function from which the chain has been drawn.
count
The number of unique (weighted) points in the chain file.
This is essentially the number of rows in the chain file
minus one (representing the header line).
plot
A structure containing the graphics tools for the
visualization of the contents of the file.
df
The unweighted (Markovian) contents of the chain file in the
form of a pandas-library DataFrame (hence called ``df``).
contents
corresponding to each column in the progress file, a property
with the same name as the column header is also created
for the object which contains the data stored in that column
of the progress file. These properties are all stored in the
attribute ``contents``.
If ``renabled = True``, the list of objects will be returned as the
return value of the method. Otherwise, the list will be stored in a
component of the ParaDRAM object named ``markovChainList``.
"""
return self._readTabular( file = file
, fileType = "markovChain"
, delimiter = delimiter
, parseContents = parseContents
, renabled = renabled
)
################################################################################################################################
| 44.297337
| 132
| 0.566672
|
794cbd7f46e5b3151decc39867de4882abcbd087
| 1,072
|
py
|
Python
|
app/routes.py
|
opt9/vuln-python-flask2
|
2b1753abc29ab9f3bf14c6a9fd348ae41acfa180
|
[
"MIT"
] | null | null | null |
app/routes.py
|
opt9/vuln-python-flask2
|
2b1753abc29ab9f3bf14c6a9fd348ae41acfa180
|
[
"MIT"
] | 1
|
2020-07-27T09:55:28.000Z
|
2020-07-27T09:55:28.000Z
|
app/routes.py
|
opt9/vuln-python-flask2
|
2b1753abc29ab9f3bf14c6a9fd348ae41acfa180
|
[
"MIT"
] | null | null | null |
from flask import request, render_template_string, render_template, Markup
from app import app
@app.route('/hello')
def hello_world():
user = {'username':"world", 'secret':"dG9wIHNlY3JldA=="}
if request.args.get('username'):
user['username'] = request.args.get('username')
template = Markup('''<h2>Hi %s!</h2>''') % user['username']
return render_template_string(template, user=user)
@app.errorhandler(404)
def page_not_found(error):
template = Markup(
'''{%% extends "layout.html" %%}
{%% block content %%}
<div class="center-content error">
<h1>Oops! That page doesn't exist.</h1>
<h3>%s</h3>
</div>
{%% endblock %%}
''') % (request.url)
return render_template_string(template), 404
@app.errorhandler(500)
def internal_error(error):
template = Markup(
'''{%% extends "layout.html" %%}
{%% block content %%}
<div class="center-content error">
<h1>Oops! Something wrong!</h1>
<h3>%s</h3>
</div>
{%% endblock %%}
''') % (request.url)
return render_template_string(template), 500
| 26.8
| 74
| 0.63153
|
794cbe3a160ef12eb393ca1e87d176c2ed587793
| 798
|
py
|
Python
|
properties.py
|
FTAsr/STS
|
07fd4720cf00c9c78733718bd032fba7d92efc3a
|
[
"MIT"
] | null | null | null |
properties.py
|
FTAsr/STS
|
07fd4720cf00c9c78733718bd032fba7d92efc3a
|
[
"MIT"
] | null | null | null |
properties.py
|
FTAsr/STS
|
07fd4720cf00c9c78733718bd032fba7d92efc3a
|
[
"MIT"
] | null | null | null |
## Contains the configurable parameters for the Short Answer Scoring system.
## Set the required grading scale as True
GRADING_SCALE_MULTICLASS = False
GRADING_SCALE_REAL_0_5 = True
GRADING_SCALE_REAL_0_1 = False
GRADING_SCALE_Integer_0_5 = False
GRADING_SCALE_LABELS = ['Incorrect', 'Partially Correct', 'Correct']
PARTIALLY_CORRECT_LOW = 0
PARTIALLY_CORRECT_HIGH = 3
## Set the required mode of operation
INTERACTIVE_MODE = True
BATCH_MODE = False
## Threshold for feedback
LOWER_LIMIT = 15.0
## Set the pre-trained classifier
BEST_CLASSIFIER_COLLEGE = 'feed+fb+college.file'
BEST_CLASSIFIER_1A = 'bow+fb+1A.file'
BEST_CLASSIFIER_2A = 'bow+fb+2A.file'
BEST_CLASSIFIER_SICK = 'bow+fb+sick.file'
BEST_CLASSIFIER_STS = 'bow+fb+sts.file'
# Output folder for Batch mode testing
OUTPUT_PATH = ''
| 27.517241
| 76
| 0.79198
|
794cbe84dff75acb6846184862b70b8c7854a29d
| 2,286
|
py
|
Python
|
insta/tests.py
|
EmmanuelMuchiri/instagram
|
d737e4afc34058c7b725e30145a8fe31187fc8dd
|
[
"MIT"
] | 1
|
2021-05-03T19:08:58.000Z
|
2021-05-03T19:08:58.000Z
|
insta/tests.py
|
EmmanuelMuchiri/instagram
|
d737e4afc34058c7b725e30145a8fe31187fc8dd
|
[
"MIT"
] | 4
|
2020-06-05T22:39:56.000Z
|
2021-09-08T01:15:38.000Z
|
insta/tests.py
|
EmmanuelMuchiri/instagram
|
d737e4afc34058c7b725e30145a8fe31187fc8dd
|
[
"MIT"
] | 2
|
2019-09-03T08:49:49.000Z
|
2019-11-19T12:57:55.000Z
|
from django.test import TestCase
from .models import Image,Profile
from django.contrib.auth.models import User
# Create your tests here.
class ImageTestClass(TestCase):
# Set up method
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='12345')
self.profile = Profile(user = self.user)
self.profile.save()
self.image = Image(id=1,image = 'path/to/image',image_name='test',image_caption='test caption',user=self.user,profile=self.profile)
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.image,Image))
#Testing Save method
def test_save_image(self):
self.image.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 0)
#Testing Update Method
def test_update_caption(self):
self.image.save_image()
self.image = Image.objects.get(pk = 1)
self.image.update_caption('updated caption')
self.updated_image = Image.objects.get(id = 1)
self.assertEqual(self.updated_image.image_caption,"updated caption")
#Testing Delete Method
def test_delete_image(self):
self.image.delete_image()
self.assertTrue(len(Image.objects.all()) == 0)
class ProfileTestClass(TestCase):
# Set up method
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='12345')
self.profile = Profile(id=1,profile_photo='path/to/photo',user = self.user,bio='test bio')
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.profile,Profile))
#Testing save method
def test_save_profile(self):
self.profile.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
#Testing updtae method
def test_update_profile(self):
self.profile.save_profile()
self.profile = Profile.objects.get(pk = 1)
self.profile.update_bio('updated bio')
self.updated_profile = Profile.objects.get(pk = 1)
self.assertEqual(self.updated_profile.bio,"updated bio")
#Testing Delete Method
def test_delete_image(self):
self.profile.delete_profile()
self.assertTrue(len(Profile.objects.all()) == 0)
| 35.71875
| 139
| 0.678478
|
794cbfcf818c422530db87edb9eff13ed6fe999f
| 3,564
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/bordetellabronchiseptica.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/bordetellabronchiseptica.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/bordetellabronchiseptica.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Bordetella bronchiseptica.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def BordetellaBronchiseptica(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Bordetella bronchiseptica graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Bordetella bronchiseptica graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="BordetellaBronchiseptica",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33
| 223
| 0.679012
|
794cc0b3895dccd7e9ce551240f8e736030f91d0
| 645
|
py
|
Python
|
CursoEmVideo_python/Mundo 1/Exercicios/desafio4_aula6.py
|
IgorBalest/PythonCodes
|
58e8ac7523fa599395c8dcdda2c2bd81c190a021
|
[
"MIT"
] | null | null | null |
CursoEmVideo_python/Mundo 1/Exercicios/desafio4_aula6.py
|
IgorBalest/PythonCodes
|
58e8ac7523fa599395c8dcdda2c2bd81c190a021
|
[
"MIT"
] | null | null | null |
CursoEmVideo_python/Mundo 1/Exercicios/desafio4_aula6.py
|
IgorBalest/PythonCodes
|
58e8ac7523fa599395c8dcdda2c2bd81c190a021
|
[
"MIT"
] | null | null | null |
entrada = input('Digite algo: ')
print('O tipo primitivo dessa variavel é {}'.format(type(entrada)))
print('è um identificador? ', entrada.isidentifier())
print('é da tabelas ascII? ' , entrada.isascii())
print('É letra minuscula? ', entrada.islower())
print('É somente espaços? ', entrada.isspace())
print('É letra maiuscula? ', entrada.isupper())
print('É alfanumerico? ', entrada.isalnum())
print('É numerico? ', entrada.isnumeric())
print('É decimal? ', entrada.isdecimal())
print('É alfabeto? ', entrada.isalpha())
print('É printavel?', entrada.isprintable())
print('É titulo? ', entrada.istitle())
print('É digito? ', entrada.isdigit())
| 37.941176
| 67
| 0.700775
|
794cc1215e183e4c3a81015a2289e0d7695227a6
| 11,659
|
py
|
Python
|
lib/python3.6/site-packages/ansible/modules/network/nxos/nxos_vpc.py
|
Thekubebro/jupyter-playbook
|
7b14ddfdfca09e8a569b155d2604083692943986
|
[
"Apache-2.0"
] | null | null | null |
lib/python3.6/site-packages/ansible/modules/network/nxos/nxos_vpc.py
|
Thekubebro/jupyter-playbook
|
7b14ddfdfca09e8a569b155d2604083692943986
|
[
"Apache-2.0"
] | null | null | null |
lib/python3.6/site-packages/ansible/modules/network/nxos/nxos_vpc.py
|
Thekubebro/jupyter-playbook
|
7b14ddfdfca09e8a569b155d2604083692943986
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vpc
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages global VPC configuration
description:
- Manages global VPC configuration
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- The feature vpc must be enabled before this module can be used
- If not using management vrf, vrf must be globally on the device
before using in the pkl config
- Although source IP isn't required on the command line it is
required when using this module. The PKL VRF must also be configured
prior to using this module.
- Both pkl_src and pkl_dest are needed when changing PKL VRF.
options:
domain:
description:
- VPC domain
required: true
role_priority:
description:
- Role priority for device. Remember lower is better.
system_priority:
description:
- System priority device. Remember they must match between peers.
pkl_src:
description:
- Source IP address used for peer keepalive link
pkl_dest:
description:
- Destination (remote) IP address used for peer keepalive link
pkl_vrf:
description:
- VRF used for peer keepalive link
default: management
peer_gw:
description:
- Enables/Disables peer gateway
type: bool
auto_recovery:
description:
- Enables/Disables auto recovery
type: bool
delay_restore:
description:
- manages delay restore command and config value in seconds
type: bool
state:
description:
- Manages desired state of the resource
required: true
choices: ['present','absent']
'''
EXAMPLES = '''
- name: configure a simple asn
nxos_vpc:
domain: 100
role_priority: 1000
system_priority: 2000
pkl_dest: 192.168.100.4
pkl_src: 10.1.100.20
peer_gw: true
auto_recovery: true
- name: configure
nxos_vpc:
domain: 100
role_priority: 32667
system_priority: 2000
peer_gw: true
pkl_src: 10.1.100.2
pkl_dest: 192.168.100.4
auto_recovery: true
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["vpc domain 100",
"peer-keepalive destination 192.168.100.4 source 10.1.100.20 vrf management",
"auto-recovery", "peer-gateway"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
CONFIG_ARGS = {
'role_priority': 'role priority {role_priority}',
'system_priority': 'system-priority {system_priority}',
'delay_restore': 'delay restore {delay_restore}',
'peer_gw': '{peer_gw} peer-gateway',
'auto_recovery': '{auto_recovery} auto-recovery',
}
PARAM_TO_DEFAULT_KEYMAP = {
'delay_restore': '60',
'role_priority': '32667',
'system_priority': '32667',
'peer_gw': False,
}
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_vrf_list(module):
try:
body = run_commands(module, ['show vrf all | json'])[0]
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (KeyError, AttributeError):
return []
vrf_list = []
if vrf_table:
for each in vrf_table:
vrf_list.append(str(each['vrf_name'].lower()))
return vrf_list
def get_auto_recovery_default(module):
auto = False
data = run_commands(module, ['show inventory | json'])[0]
pid = data['TABLE_inv']['ROW_inv'][0]['productid']
if re.search(r'N7K', pid):
auto = True
elif re.search(r'N9K', pid):
data = run_commands(module, ['show hardware | json'])[0]
ver = data['kickstart_ver_str']
if re.search(r'7.0\(3\)F', ver):
auto = True
return auto
def get_vpc(module):
body = run_commands(module, ['show vpc | json'])[0]
if body:
domain = str(body['vpc-domain-id'])
else:
body = run_commands(module, ['show run vpc | inc domain'])[0]
if body:
domain = body.split()[2]
else:
domain = 'not configured'
vpc = {}
if domain != 'not configured':
run = get_config(module, flags=['vpc'])
if run:
vpc['domain'] = domain
for key in PARAM_TO_DEFAULT_KEYMAP.keys():
vpc[key] = PARAM_TO_DEFAULT_KEYMAP.get(key)
vpc['auto_recovery'] = get_auto_recovery_default(module)
vpc_list = run.split('\n')
for each in vpc_list:
if 'role priority' in each:
line = each.split()
vpc['role_priority'] = line[-1]
if 'system-priority' in each:
line = each.split()
vpc['system_priority'] = line[-1]
if 'delay restore' in each:
line = each.split()
vpc['delay_restore'] = line[-1]
if 'no auto-recovery' in each:
vpc['auto_recovery'] = False
elif 'auto-recovery' in each:
vpc['auto_recovery'] = True
if 'peer-gateway' in each:
vpc['peer_gw'] = True
if 'peer-keepalive destination' in each:
line = each.split()
vpc['pkl_dest'] = line[2]
vpc['pkl_vrf'] = 'management'
if 'source' in each:
vpc['pkl_src'] = line[4]
if 'vrf' in each:
vpc['pkl_vrf'] = line[6]
else:
if 'vrf' in each:
vpc['pkl_vrf'] = line[4]
return vpc
def get_commands_to_config_vpc(module, vpc, domain, existing):
vpc = dict(vpc)
domain_only = vpc.get('domain')
commands = []
if 'pkl_dest' in vpc:
pkl_command = 'peer-keepalive destination {pkl_dest}'.format(**vpc)
if 'pkl_src' in vpc:
pkl_command += ' source {pkl_src}'.format(**vpc)
if 'pkl_vrf' in vpc and vpc['pkl_vrf'] != 'management':
pkl_command += ' vrf {pkl_vrf}'.format(**vpc)
commands.append(pkl_command)
if 'auto_recovery' in vpc:
if not vpc.get('auto_recovery'):
vpc['auto_recovery'] = 'no'
else:
vpc['auto_recovery'] = ''
if 'peer_gw' in vpc:
if not vpc.get('peer_gw'):
vpc['peer_gw'] = 'no'
else:
vpc['peer_gw'] = ''
for param in vpc:
command = CONFIG_ARGS.get(param)
if command is not None:
command = command.format(**vpc).strip()
if 'peer-gateway' in command:
commands.append('terminal dont-ask')
commands.append(command)
if commands or domain_only:
commands.insert(0, 'vpc domain {0}'.format(domain))
return commands
def main():
argument_spec = dict(
domain=dict(required=True, type='str'),
role_priority=dict(required=False, type='str'),
system_priority=dict(required=False, type='str'),
pkl_src=dict(required=False),
pkl_dest=dict(required=False),
pkl_vrf=dict(required=False),
peer_gw=dict(required=False, type='bool'),
auto_recovery=dict(required=False, type='bool'),
delay_restore=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'warnings': warnings}
domain = module.params['domain']
role_priority = module.params['role_priority']
system_priority = module.params['system_priority']
pkl_src = module.params['pkl_src']
pkl_dest = module.params['pkl_dest']
pkl_vrf = module.params['pkl_vrf']
peer_gw = module.params['peer_gw']
auto_recovery = module.params['auto_recovery']
delay_restore = module.params['delay_restore']
state = module.params['state']
args = dict(domain=domain, role_priority=role_priority,
system_priority=system_priority, pkl_src=pkl_src,
pkl_dest=pkl_dest, pkl_vrf=pkl_vrf, peer_gw=peer_gw,
auto_recovery=auto_recovery,
delay_restore=delay_restore)
if not pkl_dest:
if pkl_src:
module.fail_json(msg='dest IP for peer-keepalive is required'
' when src IP is present')
elif pkl_vrf:
if pkl_vrf != 'management':
module.fail_json(msg='dest and src IP for peer-keepalive are required'
' when vrf is present')
else:
module.fail_json(msg='dest IP for peer-keepalive is required'
' when vrf is present')
if pkl_vrf:
if pkl_vrf.lower() not in get_vrf_list(module):
module.fail_json(msg='The VRF you are trying to use for the peer '
'keepalive link is not on device yet. Add it'
' first, please.')
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_vpc(module)
commands = []
if state == 'present':
delta = {}
for key, value in proposed.items():
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if existing.get(key) != value:
delta[key] = value
if delta:
command = get_commands_to_config_vpc(module, delta, domain, existing)
commands.append(command)
elif state == 'absent':
if existing:
if domain != existing['domain']:
module.fail_json(msg="You are trying to remove a domain that "
"does not exist on the device")
else:
commands.append('terminal dont-ask')
commands.append('no vpc domain {0}'.format(domain))
cmds = flatten_list(commands)
results['commands'] = cmds
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
module.exit_json(**results)
if __name__ == '__main__':
main()
| 32.118457
| 89
| 0.597135
|
794cc15c6b2e6600e641659903d78082a3eb4c2b
| 23,170
|
py
|
Python
|
oadenv/lib/python2.7/site-packages/django/db/backends/base/operations.py
|
isabernardes/Archaeodatabase
|
86090e8f840d5d202c15906e614d683f8a12d3bc
|
[
"MIT"
] | 7
|
2017-02-12T06:03:00.000Z
|
2020-12-31T11:57:35.000Z
|
oadenv/lib/python2.7/site-packages/django/db/backends/base/operations.py
|
isabernardes/Archaeodatabase
|
86090e8f840d5d202c15906e614d683f8a12d3bc
|
[
"MIT"
] | 10
|
2017-07-13T00:24:03.000Z
|
2017-07-17T07:39:03.000Z
|
oadenv/lib/python2.7/site-packages/django/db/backends/base/operations.py
|
isabernardes/Archaeodatabase
|
86090e8f840d5d202c15906e614d683f8a12d3bc
|
[
"MIT"
] | 7
|
2017-08-01T04:02:07.000Z
|
2018-10-06T21:07:20.000Z
|
import datetime
import decimal
import warnings
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import six, timezone
from django.utils.dateparse import parse_duration
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
# RemovedInDjango20Warning
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain Unicode values.
def to_unicode(s):
return force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_unicode(k): to_unicode(v) for k, v in params.items()}
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc.). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes an SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transforms a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_aggregate_support(self, aggregate_func):
warnings.warn(
"check_aggregate_support has been deprecated. Use "
"check_expression_support instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.check_expression_support(aggregate_func)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return '%s'
def modify_insert_params(self, placeholder, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
raise NotImplementedError("This backend does not support %s subtraction." % internal_type)
| 38.108553
| 117
| 0.648899
|
794cc2b6a77d0b20456ade1ebfc64e113e0bb6b4
| 1,536
|
py
|
Python
|
src/veiws/class/parser.py
|
FrostyBonny/MSDevoDevelop
|
9e6f0685806c26d3e294fb976e422f67ab581124
|
[
"MIT"
] | 1
|
2019-05-15T03:17:27.000Z
|
2019-05-15T03:17:27.000Z
|
src/veiws/class/parser.py
|
FrostyBonny/MSDevoDevelop
|
9e6f0685806c26d3e294fb976e422f67ab581124
|
[
"MIT"
] | null | null | null |
src/veiws/class/parser.py
|
FrostyBonny/MSDevoDevelop
|
9e6f0685806c26d3e294fb976e422f67ab581124
|
[
"MIT"
] | null | null | null |
from flask_restful import reqparse
getParser = reqparse.RequestParser()
getParser.add_argument('name', type=str, help='please enter name')
getParser.add_argument('id', type=str, help='please enter id')
getParser.add_argument('type', type=str, help='please enter type')
getParser.add_argument('page', type=int, help='please enter page')
getParser.add_argument('limit', type=int, help='please enter limit')
getParser.add_argument('token', type=str, location='headers')
deleteParser = reqparse.RequestParser()
deleteParser.add_argument('id', type=int, help='please enter id', required=True)
deleteParser.add_argument('token', type=str, location='headers')
postParser = reqparse.RequestParser()
postParser.add_argument('id', type=str, help='please enter id', required=True)
postParser.add_argument('name', type=str, help='please enter name')
postParser.add_argument('header', type=str, help='please enter header')
postParser.add_argument('token', type=str, location='headers')
# putParser.add_argument('id',required=True)
# putParser.add_argument('total')
# putParser.add_argument('arrived')
# putParser.add_argument('name')
# putParser.add_argument('token')
putParser = reqparse.RequestParser()
# postParser.add_argument('id', type=int, help='please enter id', required=True)
putParser.add_argument('name', type=str, help='please enter name', required=True)
putParser.add_argument('header', type=str, help='please enter header', required=True)
putParser.add_argument('token', type=str, location='headers')
# args = parser.parse_args()
| 48
| 85
| 0.770833
|
794cc30a936c217d2f912a51492b5dc3f167ec95
| 3,065
|
py
|
Python
|
messenger/modules/weight_correctness.py
|
NCATS-Gamma/robokop-messenger
|
04cd6c614f0503ce7969eedab994abe6d548dde2
|
[
"MIT"
] | null | null | null |
messenger/modules/weight_correctness.py
|
NCATS-Gamma/robokop-messenger
|
04cd6c614f0503ce7969eedab994abe6d548dde2
|
[
"MIT"
] | 4
|
2020-03-26T12:05:56.000Z
|
2020-08-04T15:38:59.000Z
|
messenger/modules/weight_correctness.py
|
NCATS-Gamma/robokop-messenger
|
04cd6c614f0503ce7969eedab994abe6d548dde2
|
[
"MIT"
] | null | null | null |
"""Weight edges."""
from collections import defaultdict
import math
from typing import Optional
from fastapi import Query
from reasoner_pydantic import Request, Message
async def query(
request: Request,
relevance: Optional[float] = Query(
0.0025,
description='portion of cooccurrence pubs relevant to question',
),
wt_min: Optional[float] = Query(
0.0,
description='minimum weight (at 0 pubs)',
),
wt_max: Optional[float] = Query(
1.0,
description='maximum weight (at inf pubs)',
),
p50: Optional[float] = Query(
2.0,
description='pubs at 50% of wt_max',
),
) -> Message:
"""Weight kgraph edges based on metadata.
"19 pubs from CTD is a 1, and 2 should at least be 0.5"
- cbizon
"""
message = request.message.dict()
def sigmoid(x):
"""Scale with partial sigmoid - the right (concave down) half.
Such that:
f(0) = wt_min
f(inf) = wt_max
f(p50) = 0.5 * wt_max
"""
a = 2 * (wt_max - wt_min)
r = 0.5 * wt_max
c = wt_max - 2 * wt_min
k = 1 / p50 * (math.log(r + c) - math.log(a - r - c))
return a / (1 + math.exp(-k * x)) - c
kgraph = message['knowledge_graph']
node_pubs = {n['id']: n.get('omnicorp_article_count', None) for n in kgraph['nodes']}
all_pubs = 27840000
results = message['results']
# ensure that each edge_binding has a single kg_id
for result in results:
result['edge_bindings'] = [
eb
for ebs in result['edge_bindings']
for eb in (
[
{
'qg_id': ebs['qg_id'],
'kg_id': kg_id,
}
for kg_id in ebs['kg_id']
] if isinstance(ebs['kg_id'], list)
else [ebs]
)
]
# map kedges to edge_bindings
krmap = defaultdict(list)
for result in results:
for eb in result['edge_bindings']:
assert isinstance(eb['kg_id'], str)
eb['weight'] = eb.get('weight', 1.0)
krmap[eb['kg_id']].append(eb)
edges = kgraph['edges']
for edge in edges:
edge_pubs = edge.get('num_publications', len(edge.get('publications', [])))
if edge['type'] == 'literature_co-occurrence':
source_pubs = int(node_pubs[edge['source_id']])
target_pubs = int(node_pubs[edge['target_id']])
cov = (edge_pubs / all_pubs) - (source_pubs / all_pubs) * (target_pubs / all_pubs)
cov = max((cov, 0.0))
effective_pubs = cov * all_pubs * relevance
else:
effective_pubs = edge_pubs + 1 # consider the curation a pub
for redge in krmap[edge['id']]:
redge['weight'] = redge.get('weight', 1.0) * sigmoid(effective_pubs)
message['knowledge_graph'] = kgraph
return Message(**message)
| 30.959596
| 94
| 0.530506
|
794cc310d56af3fafc4e553fb157d7c61420f5eb
| 837
|
py
|
Python
|
ics2csv.py
|
Guiraud/csv-ical
|
60f55ae494ff42074742891784799c95acf6af6c
|
[
"MIT"
] | null | null | null |
ics2csv.py
|
Guiraud/csv-ical
|
60f55ae494ff42074742891784799c95acf6af6c
|
[
"MIT"
] | null | null | null |
ics2csv.py
|
Guiraud/csv-ical
|
60f55ae494ff42074742891784799c95acf6af6c
|
[
"MIT"
] | null | null | null |
from csv_ical import Convert
convert = Convert()
convert.CSV_FILE_LOCATION = 'mg.csv'
convert.SAVE_LOCATION = 'mg.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
convert = Convert()
convert.CSV_FILE_LOCATION = 'mg_CNAM.csv'
convert.SAVE_LOCATION = 'mg_CNAM.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
convert = Convert()
convert.CSV_FILE_LOCATION = 'mg_CSE.csv'
convert.SAVE_LOCATION = 'mg_CSE.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
convert = Convert()
convert.CSV_FILE_LOCATION = 'mg_DS.csv'
convert.SAVE_LOCATION = 'mg_DS.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
| 24.617647
| 43
| 0.81362
|
794cc37e26cc2e63cc4c858d7a3b45a00ceb467a
| 7,842
|
py
|
Python
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_largefile.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_largefile.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_largefile.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
"""Test largefile support on system where this makes sense.
"""
from __future__ import print_function
import os
import stat
import sys
import unittest
from test.test_support import run_unittest, TESTFN, verbose, requires, \
unlink
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import signal
# The default handler for SIGXFSZ is to abort the process.
# By ignoring it, system calls exceeding the file size resource
# limit will raise IOError instead of crashing the interpreter.
oldhandler = signal.signal(signal.SIGXFSZ, signal.SIG_IGN)
except (ImportError, AttributeError):
pass
# create >2GB file (2GB = 2147483648 bytes)
size = 2500000000
class LargeFileTest(unittest.TestCase):
"""Test that each file function works as expected for a large
(i.e. > 2GB, do we have to check > 4GB) files.
NOTE: the order of execution of the test methods is important! test_seek
must run first to create the test file. File cleanup must also be handled
outside the test instances because of this.
"""
def test_seek(self):
if verbose:
print('create large file via seek (may be sparse file) ...')
with self.open(TESTFN, 'wb') as f:
f.write(b'z')
f.seek(0)
f.seek(size)
f.write(b'a')
f.flush()
if verbose:
print('check file size with os.fstat')
self.assertEqual(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
def test_osstat(self):
if verbose:
print('check file size with os.stat')
self.assertEqual(os.stat(TESTFN)[stat.ST_SIZE], size+1)
def test_seek_read(self):
if verbose:
print('play around with seek() and read() with the built largefile')
with self.open(TESTFN, 'rb') as f:
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
f.seek(0)
self.assertEqual(f.tell(), 0)
f.seek(0, 0)
self.assertEqual(f.tell(), 0)
f.seek(42)
self.assertEqual(f.tell(), 42)
f.seek(42, 0)
self.assertEqual(f.tell(), 42)
f.seek(42, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 2) # seek from the end
self.assertEqual(f.tell(), size + 1 + 0)
f.seek(-10, 2)
self.assertEqual(f.tell(), size + 1 - 10)
f.seek(-size-1, 2)
self.assertEqual(f.tell(), 0)
f.seek(size)
self.assertEqual(f.tell(), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
f.seek(-size-1, 1)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
def test_lseek(self):
if verbose:
print('play around with os.lseek() with the built largefile')
with self.open(TESTFN, 'rb') as f:
self.assertEqual(os.lseek(f.fileno(), 0, 0), 0)
self.assertEqual(os.lseek(f.fileno(), 42, 0), 42)
self.assertEqual(os.lseek(f.fileno(), 42, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 2), size+1+0)
self.assertEqual(os.lseek(f.fileno(), -10, 2), size+1-10)
self.assertEqual(os.lseek(f.fileno(), -size-1, 2), 0)
self.assertEqual(os.lseek(f.fileno(), size, 0), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
def test_truncate(self):
if verbose:
print('try truncate')
with self.open(TESTFN, 'r+b') as f:
# this is already decided before start running the test suite
# but we do it anyway for extra protection
if not hasattr(f, 'truncate'):
raise unittest.SkipTest("open().truncate() not available on this system")
f.seek(0, 2)
# else we've lost track of the true size
self.assertEqual(f.tell(), size+1)
# Cut it back via seek + truncate with no argument.
newsize = size - 10
f.seek(newsize)
f.truncate()
self.assertEqual(f.tell(), newsize) # else pointer moved
f.seek(0, 2)
self.assertEqual(f.tell(), newsize) # else wasn't truncated
# Ensure that truncate(smaller than true size) shrinks
# the file.
newsize -= 1
f.seek(42)
f.truncate(newsize)
if self.new_io:
self.assertEqual(f.tell(), 42)
f.seek(0, 2)
self.assertEqual(f.tell(), newsize)
# XXX truncate(larger than true size) is ill-defined
# across platform; cut it waaaaay back
f.seek(0)
f.truncate(1)
if self.new_io:
self.assertEqual(f.tell(), 0) # else pointer moved
f.seek(0)
self.assertEqual(len(f.read()), 1) # else wasn't truncated
def test_seekable(self):
# Issue #5016; seekable() can return False when the current position
# is negative when truncated to an int.
if not self.new_io:
self.skipTest("builtin file doesn't have seekable()")
for pos in (2**31-1, 2**31, 2**31+1):
with self.open(TESTFN, 'rb') as f:
f.seek(pos)
self.assertTrue(f.seekable())
def test_main():
# On Windows and Mac OSX this test comsumes large resources; It
# takes a long time to build the >2GB file and takes >2GB of disk
# space therefore the resource must be enabled to run this test.
# If not, nothing after this line stanza will be executed.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(size))
else:
# Only run if the current filesystem supports large files.
# (Skip this test on Windows, since we now always support
# large files.)
f = open(TESTFN, 'wb', buffering=0)
try:
# 2**31 == 2147483648
f.seek(2147483649)
# Seeking is not enough of a test: you must write and
# flush, too!
f.write(b'x')
f.flush()
except (IOError, OverflowError):
f.close()
unlink(TESTFN)
raise unittest.SkipTest("filesystem does not have largefile support")
else:
f.close()
suite = unittest.TestSuite()
for _open, prefix in [(io.open, 'C'), (pyio.open, 'Py'),
(open, 'Builtin')]:
class TestCase(LargeFileTest):
pass
TestCase.open = staticmethod(_open)
TestCase.new_io = _open is not open
TestCase.__name__ = prefix + LargeFileTest.__name__
suite.addTest(TestCase('test_seek'))
suite.addTest(TestCase('test_osstat'))
suite.addTest(TestCase('test_seek_read'))
suite.addTest(TestCase('test_lseek'))
with _open(TESTFN, 'wb') as f:
if hasattr(f, 'truncate'):
suite.addTest(TestCase('test_truncate'))
suite.addTest(TestCase('test_seekable'))
unlink(TESTFN)
try:
run_unittest(suite)
finally:
unlink(TESTFN)
if __name__ == '__main__':
test_main()
| 39.014925
| 90
| 0.551135
|
794cc3beb6f6b9e71383fb7f2e9339342830a82f
| 504
|
py
|
Python
|
utils/decorators.py
|
enaluz/cis450-topical-analysis
|
69da5c184b207598548cbf305ee69e09739c557a
|
[
"MIT"
] | null | null | null |
utils/decorators.py
|
enaluz/cis450-topical-analysis
|
69da5c184b207598548cbf305ee69e09739c557a
|
[
"MIT"
] | 6
|
2020-04-24T03:28:32.000Z
|
2021-09-08T01:55:26.000Z
|
utils/decorators.py
|
enaluz/cis450-topical-analysis
|
69da5c184b207598548cbf305ee69e09739c557a
|
[
"MIT"
] | null | null | null |
def exceptionHandler(childFunction):
def higherOrderFunction(*args, **kwargs):
try:
return childFunction(*args, **kwargs)
except Exception as e:
print("Caught Error: ", e)
pass
return higherOrderFunction
def classDecorator(decorator):
def decorate(cls):
for attr in cls.__dict__:
if callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
| 28
| 65
| 0.603175
|
794cc3ee19cc60276ee0b435f1551be1e97a3ee1
| 2,627
|
py
|
Python
|
models/admin_control.py
|
chaoannricardo/NTU_CARDO_Database
|
5fbfa1383f2e65a04fabd863c68373f45bbf05fd
|
[
"Apache-2.0"
] | 1
|
2020-07-04T22:30:41.000Z
|
2020-07-04T22:30:41.000Z
|
models/admin_control.py
|
chaoannricardo/NTU_CARDO_Database
|
5fbfa1383f2e65a04fabd863c68373f45bbf05fd
|
[
"Apache-2.0"
] | null | null | null |
models/admin_control.py
|
chaoannricardo/NTU_CARDO_Database
|
5fbfa1383f2e65a04fabd863c68373f45bbf05fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf8 -*-
from time import sleep as t_sleep
import configuration as conf
from models import data_processing, database_management, file_management
import pymysql
from views import view_CLI
def admin_control():
print("【管理員模式】")
print("0. 產生主表(請使用專用表格)")
command = input("# 請輸入您所需要的功能,或輸入'exit'返回主選單: ")
if command == 'exit':
print("# 返回主選單")
t_sleep(1)
elif command == "0":
# "C:\Users\ricardo\Desktop\Data\0311_藍天百腦匯報名清單(登陸出席).csv"
while True:
account = input("# 請輸入帳號: ")
password = input("# 請輸入密碼: ")
try:
config = conf.get_config(account, password)
# 身分驗證
print('# 登入中....')
conn = database_management.pymysql_connect(**config)
print("# 登入成功,歡迎回來", account, '\n\n')
t_sleep(1)
break
except pymysql.err.OperationalError:
print("# 您輸入的帳號或密碼錯誤,請再輸入一次。\n\n")
# 12. 【活動結束後資料建檔】「已登記出席統計表」生成「計算完成統計表」並「輸入資料庫」"
# "C:\Users\ricardo\Desktop\Data\0311_藍天百腦匯報名清單(登陸出席).csv"
# Produce csv file after processing
path, sem, semester_first, semester_second, fc, sc, date = view_CLI.get_information("10")
file_source = file_management.File(path, sem, semester_first, semester_second, fc, sc, date)
file_source.get_file()
data_source = data_processing.Data(file_source.year,
file_source.semester,
file_source.file_path,
file_source.first_cat,
file_source.second_cat)
data, produced_df_path = data_source.data_processing()
file_management.remove_temp()
print('# 成功生成CSV')
print('# 開始將生成csv輸入資料庫...')
# set name of the table
db_connection = database_management.DataConnection(data, config, fc, sc, date)
# create new table for the data
db_connection.create_table("主資料表")
'''
To tackle 'The MySQL server is running with the --secure-file-priv option so it cannot execute this statement' error
reference: https://blog.csdn.net/fdipzone/article/details/78634992
'''
# insert data into mysql table
db_connection.insert_table("主資料表")
db_connection.create_table("黑名單統計表")
db_connection.insert_table("黑名單統計表")
print("# 資料輸入資料庫成功,返回主選單")
t_sleep(1)
file_management.remove_temp()
if __name__ == '__main__':
admin_control()
| 39.80303
| 124
| 0.584697
|
794cc404f6714d2bfc8ab828db342ba43ac28df0
| 9,745
|
py
|
Python
|
ucsmsdk/mometa/compute/ComputePCIeFatalProtocolStats.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 78
|
2015-11-30T14:10:05.000Z
|
2022-02-13T00:29:08.000Z
|
ucsmsdk/mometa/compute/ComputePCIeFatalProtocolStats.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 113
|
2015-11-20T09:42:46.000Z
|
2022-03-16T16:53:29.000Z
|
ucsmsdk/mometa/compute/ComputePCIeFatalProtocolStats.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 86
|
2015-12-12T08:22:18.000Z
|
2022-01-23T03:56:34.000Z
|
"""This module contains the general information for ComputePCIeFatalProtocolStats ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ComputePCIeFatalProtocolStatsConsts:
SUSPECT_FALSE = "false"
SUSPECT_NO = "no"
SUSPECT_TRUE = "true"
SUSPECT_YES = "yes"
class ComputePCIeFatalProtocolStats(ManagedObject):
"""This is ComputePCIeFatalProtocolStats class."""
consts = ComputePCIeFatalProtocolStatsConsts()
naming_props = set([])
mo_meta = MoMeta("ComputePCIeFatalProtocolStats", "computePCIeFatalProtocolStats", "pciefat-protocol-stats", VersionMeta.Version111j, "OutputOnly", 0xf, [], ["admin", "operations", "read-only"], ['computeBoard'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dllp_errors": MoPropertyMeta("dllp_errors", "dllpErrors", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors15_min": MoPropertyMeta("dllp_errors15_min", "dllpErrors15Min", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors15_min_h": MoPropertyMeta("dllp_errors15_min_h", "dllpErrors15MinH", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors1_day": MoPropertyMeta("dllp_errors1_day", "dllpErrors1Day", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors1_day_h": MoPropertyMeta("dllp_errors1_day_h", "dllpErrors1DayH", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors1_hour": MoPropertyMeta("dllp_errors1_hour", "dllpErrors1Hour", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors1_hour_h": MoPropertyMeta("dllp_errors1_hour_h", "dllpErrors1HourH", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors1_week": MoPropertyMeta("dllp_errors1_week", "dllpErrors1Week", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors1_week_h": MoPropertyMeta("dllp_errors1_week_h", "dllpErrors1WeekH", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors2_weeks": MoPropertyMeta("dllp_errors2_weeks", "dllpErrors2Weeks", "uint", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dllp_errors2_weeks_h": MoPropertyMeta("dllp_errors2_weeks_h", "dllpErrors2WeeksH", "uint", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"flow_control_errors": MoPropertyMeta("flow_control_errors", "flowControlErrors", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors15_min": MoPropertyMeta("flow_control_errors15_min", "flowControlErrors15Min", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors15_min_h": MoPropertyMeta("flow_control_errors15_min_h", "flowControlErrors15MinH", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors1_day": MoPropertyMeta("flow_control_errors1_day", "flowControlErrors1Day", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors1_day_h": MoPropertyMeta("flow_control_errors1_day_h", "flowControlErrors1DayH", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors1_hour": MoPropertyMeta("flow_control_errors1_hour", "flowControlErrors1Hour", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors1_hour_h": MoPropertyMeta("flow_control_errors1_hour_h", "flowControlErrors1HourH", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors1_week": MoPropertyMeta("flow_control_errors1_week", "flowControlErrors1Week", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors1_week_h": MoPropertyMeta("flow_control_errors1_week_h", "flowControlErrors1WeekH", "uint", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors2_weeks": MoPropertyMeta("flow_control_errors2_weeks", "flowControlErrors2Weeks", "uint", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"flow_control_errors2_weeks_h": MoPropertyMeta("flow_control_errors2_weeks_h", "flowControlErrors2WeeksH", "uint", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"intervals": MoPropertyMeta("intervals", "intervals", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"suspect": MoPropertyMeta("suspect", "suspect", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"thresholded": MoPropertyMeta("thresholded", "thresholded", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"time_collected": MoPropertyMeta("time_collected", "timeCollected", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"update": MoPropertyMeta("update", "update", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dllpErrors": "dllp_errors",
"dllpErrors15Min": "dllp_errors15_min",
"dllpErrors15MinH": "dllp_errors15_min_h",
"dllpErrors1Day": "dllp_errors1_day",
"dllpErrors1DayH": "dllp_errors1_day_h",
"dllpErrors1Hour": "dllp_errors1_hour",
"dllpErrors1HourH": "dllp_errors1_hour_h",
"dllpErrors1Week": "dllp_errors1_week",
"dllpErrors1WeekH": "dllp_errors1_week_h",
"dllpErrors2Weeks": "dllp_errors2_weeks",
"dllpErrors2WeeksH": "dllp_errors2_weeks_h",
"dn": "dn",
"flowControlErrors": "flow_control_errors",
"flowControlErrors15Min": "flow_control_errors15_min",
"flowControlErrors15MinH": "flow_control_errors15_min_h",
"flowControlErrors1Day": "flow_control_errors1_day",
"flowControlErrors1DayH": "flow_control_errors1_day_h",
"flowControlErrors1Hour": "flow_control_errors1_hour",
"flowControlErrors1HourH": "flow_control_errors1_hour_h",
"flowControlErrors1Week": "flow_control_errors1_week",
"flowControlErrors1WeekH": "flow_control_errors1_week_h",
"flowControlErrors2Weeks": "flow_control_errors2_weeks",
"flowControlErrors2WeeksH": "flow_control_errors2_weeks_h",
"intervals": "intervals",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"suspect": "suspect",
"thresholded": "thresholded",
"timeCollected": "time_collected",
"update": "update",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.dllp_errors = None
self.dllp_errors15_min = None
self.dllp_errors15_min_h = None
self.dllp_errors1_day = None
self.dllp_errors1_day_h = None
self.dllp_errors1_hour = None
self.dllp_errors1_hour_h = None
self.dllp_errors1_week = None
self.dllp_errors1_week_h = None
self.dllp_errors2_weeks = None
self.dllp_errors2_weeks_h = None
self.flow_control_errors = None
self.flow_control_errors15_min = None
self.flow_control_errors15_min_h = None
self.flow_control_errors1_day = None
self.flow_control_errors1_day_h = None
self.flow_control_errors1_hour = None
self.flow_control_errors1_hour_h = None
self.flow_control_errors1_week = None
self.flow_control_errors1_week_h = None
self.flow_control_errors2_weeks = None
self.flow_control_errors2_weeks_h = None
self.intervals = None
self.sacl = None
self.status = None
self.suspect = None
self.thresholded = None
self.time_collected = None
self.update = None
ManagedObject.__init__(self, "ComputePCIeFatalProtocolStats", parent_mo_or_dn, **kwargs)
| 76.732283
| 258
| 0.701591
|
794cc41dda9b8b4253613c304f72eac016fdb50f
| 78
|
py
|
Python
|
tasks/__init__.py
|
cyente/OFA
|
291a0abb76559a6379f1a7ebbdfdf1350c94a9f4
|
[
"Apache-2.0"
] | null | null | null |
tasks/__init__.py
|
cyente/OFA
|
291a0abb76559a6379f1a7ebbdfdf1350c94a9f4
|
[
"Apache-2.0"
] | null | null | null |
tasks/__init__.py
|
cyente/OFA
|
291a0abb76559a6379f1a7ebbdfdf1350c94a9f4
|
[
"Apache-2.0"
] | null | null | null |
from .mm_tasks import *
from .rec_tasks import *
from .ofa_task import OFATask
| 26
| 29
| 0.794872
|
794cc42d49405276791b9a5ff580e7fb9fa1abfd
| 6,620
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_interface_ext_rpc/get_interface_switchport/output/switchport/inactive_vlans/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_interface_ext_rpc/get_interface_switchport/output/switchport/inactive_vlans/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_interface_ext_rpc/get_interface_switchport/output/switchport/inactive_vlans/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class inactive_vlans(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface-ext - based on the path /brocade_interface_ext_rpc/get-interface-switchport/output/switchport/inactive-vlans. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: A conceptual group indicating the in-active
vlans for this switch-port interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__vlanid',)
_yang_name = 'inactive-vlans'
_rest_name = 'inactive-vlans'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__vlanid = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']})), is_leaf=False, yang_name="vlanid", rest_name="vlanid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='interface:vlan-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_interface_ext_rpc', u'get-interface-switchport', u'output', u'switchport', u'inactive-vlans']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-interface-switchport', u'output', u'switchport', u'inactive-vlans']
def _get_vlanid(self):
"""
Getter method for vlanid, mapped from YANG variable /brocade_interface_ext_rpc/get_interface_switchport/output/switchport/inactive_vlans/vlanid (interface:vlan-type)
YANG Description: This is a list in-active vlan
identifiers.
"""
return self.__vlanid
def _set_vlanid(self, v, load=False):
"""
Setter method for vlanid, mapped from YANG variable /brocade_interface_ext_rpc/get_interface_switchport/output/switchport/inactive_vlans/vlanid (interface:vlan-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlanid is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlanid() directly.
YANG Description: This is a list in-active vlan
identifiers.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']})), is_leaf=False, yang_name="vlanid", rest_name="vlanid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='interface:vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlanid must be of a type compatible with interface:vlan-type""",
'defined-type': "interface:vlan-type",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']})), is_leaf=False, yang_name="vlanid", rest_name="vlanid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='interface:vlan-type', is_config=True)""",
})
self.__vlanid = t
if hasattr(self, '_set'):
self._set()
def _unset_vlanid(self):
self.__vlanid = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']})), is_leaf=False, yang_name="vlanid", rest_name="vlanid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='interface:vlan-type', is_config=True)
vlanid = __builtin__.property(_get_vlanid, _set_vlanid)
_pyangbind_elements = {'vlanid': vlanid, }
| 50.151515
| 546
| 0.72855
|
794cc457055c157ef0f1677b93747b8a911ead13
| 16,472
|
py
|
Python
|
strawberryfields/apps/qchem/dynamics.py
|
Bayaniblues/strawberryfields
|
9d9e2f4488ef3783d3d4b2f226afac0bc431257e
|
[
"Apache-2.0"
] | null | null | null |
strawberryfields/apps/qchem/dynamics.py
|
Bayaniblues/strawberryfields
|
9d9e2f4488ef3783d3d4b2f226afac0bc431257e
|
[
"Apache-2.0"
] | null | null | null |
strawberryfields/apps/qchem/dynamics.py
|
Bayaniblues/strawberryfields
|
9d9e2f4488ef3783d3d4b2f226afac0bc431257e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Functions used for simulating vibrational quantum dynamics of molecules.
Photonic quantum devices can be programmed with molecular data in order to simulate the quantum
dynamics of spatially-localized vibrations in molecules :cite:`sparrow2018simulating`. To that aim,
the quantum device has to be programmed to implement the transformation:
.. math::
U(t) = U_l e^{-i\hat{H}t/\hbar} U_l^\dagger,
where :math:`\hat{H} = \sum_i \hbar \omega_i a_i^\dagger a_i` is the Hamiltonian corresponding to
the harmonic normal modes, :math:`\omega_i` is the vibrational frequency of the :math:`i`-th normal
mode, :math:`t` is time, and :math:`U_l` is a unitary matrix that relates the normal modes to a set
of new modes that are localized on specific bonds or groups in a molecule. The matrix :math:`U_l`
can be obtained by maximizing the sum of the squares of the atomic contributions to the modes
:cite:`jacob2009localizing`. Having :math:`U_l` and :math:`\omega` for a given molecule, and assuming
that it is possible to prepare the initial states of the mode, one can simulate the dynamics of
vibrational excitations in the localized basis at any given time :math:`t`. This process has three
main parts:
- Preparation of an initial vibrational state.
- Application of the dynamics transformation :math:`U(t)`.
- Generating samples and computing the probability of observing desired states.
It is noted that the initial states can be prepared in different ways. For instance, they can be
Fock states or Gaussian states such as coherent states or two-mode squeezed vacuum states.
Algorithm
---------
The algorithm for simulating the vibrational quantum dynamics in the localized basis with a photonic
device has the following form:
1. Each optical mode is assigned to a vibrational local mode and a specific initial excitation is
created using one of the state preparation methods discussed. A list of state preparations
methods available in Strawberry Fields is provided :doc:`here </introduction/ops>`.
2. An interferometer is configured according to the unitary :math:`U_l^\dagger` and the initial
state is propagated through the interferometer.
3. For each mode, a rotation gate is designed as :math:`R(\theta) = \exp(i\theta \hat{a}^{\dagger}\hat{a})`
where :math:`\theta = -\omega t`.
4. A second interferometer is configured according to the unitary :math:`U_l` and the new state
is propagated through the interferometer.
5. The number of photons in each output mode is measured.
6. Samples are generated and the probability of obtaining a specific excitation in a given mode
(or modes) is computed for time :math:`t`.
This module contains functions for implementing this algorithm.
- The function :func:`~.TimeEvolution` is an operation that contains the required
rotation operations explained in step 3 of the algorithm.
- The function :func:`~.sample_fock` generates samples for simulating vibrational quantum dynamics
in molecules with a Fock input state.
- The function :func:`~.sample_coherent` generates samples for simulating vibrational quantum
dynamics in molecules with a coherent input state.
- The function :func:`~.sample_tmsv` generates samples for simulating vibrational quantum dynamics
in molecules with a two-mode squeezed vacuum input state.
- The function :func:`~.prob` estimates the probability of observing a desired excitation in the
generated samples.
- The function :func:`~.marginals` generates single-mode marginal distributions from the
displacement vector and covariance matrix of a Gaussian state.
"""
import warnings
import numpy as np
from scipy.constants import c, pi
from thewalrus import quantum
import strawberryfields as sf
from strawberryfields.utils import operation
def TimeEvolution(w: np.ndarray, t: float):
r"""An operation for performing the transformation
:math:`e^{-i\hat{H}t/\hbar}` on a given state where :math:`\hat{H} = \sum_i \hbar \omega_i a_i^\dagger a_i`
defines a Hamiltonian of independent quantum harmonic oscillators
This operation can be used as part of a Strawberry Fields :class:`~.Program` just like any
other operation from the :mod:`~.ops` module.
**Example usage:**
>>> modes = 2
>>> p = sf.Program(modes)
>>> with p.context as q:
>>> sf.ops.Fock(1) | q[0]
>>> sf.ops.Interferometer(Ul.T) | q
>>> TimeEvolution(w, t) | q
>>> sf.ops.Interferometer(Ul) | q
Args:
w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` that
compose the Hamiltonian :math:`\hat{H} = \sum_i \hbar \omega_i a_i^\dagger a_i`
t (float): time in femtoseconds
"""
# pylint: disable=expression-not-assigned
n_modes = len(w)
@operation(n_modes)
def op(q):
theta = -w * 100.0 * c * 1.0e-15 * t * (2.0 * pi)
for i in range(n_modes):
sf.ops.Rgate(theta[i]) | q[i]
return op()
def sample_fock(
input_state: list,
t: float,
Ul: np.ndarray,
w: np.ndarray,
n_samples: int,
cutoff: int,
loss: float = 0.0,
) -> list:
r"""Generate samples for simulating vibrational quantum dynamics with an input Fock state.
**Example usage:**
>>> input_state = [0, 2]
>>> t = 10.0
>>> Ul = np.array([[0.707106781, -0.707106781],
>>> [0.707106781, 0.707106781]])
>>> w = np.array([3914.92, 3787.59])
>>> n_samples = 5
>>> cutoff = 5
>>> sample_fock(input_state, t, Ul, w, n_samples, cutoff)
[[0, 2], [0, 2], [1, 1], [0, 2], [0, 2]]
Args:
input_state (list): input Fock state
t (float): time in femtoseconds
Ul (array): normal-to-local transformation matrix
w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}`
n_samples (int): number of samples to be generated
cutoff (int): cutoff dimension for each mode
loss (float): loss parameter denoting the fraction of lost photons
Returns:
list[list[int]]: a list of samples
"""
if np.any(np.iscomplex(Ul)):
raise ValueError("The normal mode to local mode transformation matrix must be real")
if n_samples < 1:
raise ValueError("Number of samples must be at least one")
if not len(input_state) == len(Ul):
raise ValueError(
"Number of modes in the input state and the normal-to-local transformation"
" matrix must be equal"
)
if np.any(np.array(input_state) < 0):
raise ValueError("Input state must not contain negative values")
if max(input_state) >= cutoff:
raise ValueError("Number of photons in each input mode must be smaller than cutoff")
modes = len(Ul)
s = []
eng = sf.Engine("fock", backend_options={"cutoff_dim": cutoff})
prog = sf.Program(modes)
# pylint: disable=expression-not-assigned
with prog.context as q:
for i in range(modes):
sf.ops.Fock(input_state[i]) | q[i]
sf.ops.Interferometer(Ul.T) | q
TimeEvolution(w, t) | q
sf.ops.Interferometer(Ul) | q
if loss:
for _q in q:
sf.ops.LossChannel(1 - loss) | _q
sf.ops.MeasureFock() | q
for _ in range(n_samples):
s.append(eng.run(prog).samples[0].tolist())
return s
def prob(samples: list, excited_state: list) -> float:
r"""Estimate probability of observing an excited state.
The probability is estimated by calculating the relative frequency of the excited
state among the samples.
**Example usage:**
>>> excited_state = [0, 2]
>>> samples = [[0, 2], [1, 1], [0, 2], [2, 0], [1, 1], [0, 2], [1, 1], [1, 1], [1, 1]]
>>> prob(samples, excited_state)
0.3333333333333333
Args:
samples list[list[int]]: a list of samples
excited_state (list): a Fock state
Returns:
float: probability of observing a Fock state in the given samples
"""
if len(samples) == 0:
raise ValueError("The samples list must not be empty")
if len(excited_state) == 0:
raise ValueError("The excited state list must not be empty")
if not len(excited_state) == len(samples[0]):
raise ValueError("The number of modes in the samples and the excited state must be equal")
if np.any(np.array(excited_state) < 0):
raise ValueError("The excited state must not contain negative values")
return samples.count(excited_state) / len(samples)
def sample_tmsv(
r: list,
t: float,
Ul: np.ndarray,
w: np.ndarray,
n_samples: int,
loss: float = 0.0,
) -> list:
r"""Generate samples for simulating vibrational quantum dynamics with a two-mode squeezed
vacuum input state.
This function generates samples from a GBS device with two-mode squeezed vacuum input states.
Given :math:`N` squeezing parameters and an :math:`N`-dimensional normal-to-local transformation
matrix, a GBS device with :math:`2N` modes is simulated. The :func:`~.TimeEvolution` operator
acts only on the first :math:`N` modes in the device. Samples are generated by measuring the
number of photons in each of the :math:`2N` modes.
**Example usage:**
>>> r = [[0.2, 0.1], [0.8, 0.2]]
>>> t = 10.0
>>> Ul = np.array([[0.707106781, -0.707106781],
>>> [0.707106781, 0.707106781]])
>>> w = np.array([3914.92, 3787.59])
>>> n_samples = 5
>>> sample_tmsv(r, t, Ul, w, n_samples)
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 2, 0, 2]]
Args:
r (list[list[float]]): list of two-mode squeezing gate parameters given as ``[amplitude, phase]`` for all modes
t (float): time in femtoseconds
Ul (array): normal-to-local transformation matrix
w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}`
n_samples (int): number of samples to be generated
loss (float): loss parameter denoting the fraction of lost photons
Returns:
list[list[int]]: a list of samples
"""
if np.any(np.iscomplex(Ul)):
raise ValueError("The normal mode to local mode transformation matrix must be real")
if n_samples < 1:
raise ValueError("Number of samples must be at least one")
if not len(r) == len(Ul):
raise ValueError(
"Number of squeezing parameters and the number of modes in the normal-to-local"
" transformation matrix must be equal"
)
N = len(Ul)
eng = sf.LocalEngine(backend="gaussian")
prog = sf.Program(2 * N)
# pylint: disable=expression-not-assigned
with prog.context as q:
for i in range(N):
sf.ops.S2gate(r[i][0], r[i][1]) | (q[i], q[i + N])
sf.ops.Interferometer(Ul.T) | q[:N]
TimeEvolution(w, t) | q[:N]
sf.ops.Interferometer(Ul) | q[:N]
if loss:
for _q in q:
sf.ops.LossChannel(1 - loss) | _q
sf.ops.MeasureFock() | q
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning, message="Cannot simulate non-")
s = eng.run(prog, shots=n_samples).samples
return s.tolist()
def sample_coherent(
alpha: list,
t: float,
Ul: np.ndarray,
w: np.ndarray,
n_samples: int,
loss: float = 0.0,
) -> list:
r"""Generate samples for simulating vibrational quantum dynamics with an input coherent state.
**Example usage:**
>>> alpha = [[0.3, 0.5], [1.4, 0.1]]
>>> t = 10.0
>>> Ul = np.array([[0.707106781, -0.707106781],
>>> [0.707106781, 0.707106781]])
>>> w = np.array([3914.92, 3787.59])
>>> n_samples = 5
>>> sample_coherent(alpha, t, Ul, w, n_samples)
[[0, 2], [0, 1], [0, 3], [0, 2], [0, 1]]
Args:
alpha (list[list[float]]): list of displacement parameters given as ``[magnitudes, angles]``
for all modes
t (float): time in femtoseconds
Ul (array): normal-to-local transformation matrix
w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}`
n_samples (int): number of samples to be generated
loss (float): loss parameter denoting the fraction of lost photons
Returns:
list[list[int]]: a list of samples
"""
if np.any(np.iscomplex(Ul)):
raise ValueError("The normal mode to local mode transformation matrix must be real")
if n_samples < 1:
raise ValueError("Number of samples must be at least one")
if not len(alpha) == len(Ul):
raise ValueError(
"Number of displacement parameters and the number of modes in the normal-to-local"
" transformation matrix must be equal"
)
modes = len(Ul)
eng = sf.LocalEngine(backend="gaussian")
prog = sf.Program(modes)
# pylint: disable=expression-not-assigned
with prog.context as q:
for i in range(modes):
sf.ops.Dgate(alpha[i][0], alpha[i][1]) | q[i]
sf.ops.Interferometer(Ul.T) | q
TimeEvolution(w, t) | q
sf.ops.Interferometer(Ul) | q
if loss:
for _q in q:
sf.ops.LossChannel(1 - loss) | _q
sf.ops.MeasureFock() | q
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning, message="Cannot simulate non-")
s = eng.run(prog, shots=n_samples).samples
return s.tolist()
def marginals(mu: np.ndarray, V: np.ndarray, n_max: int, hbar: float = 2.0) -> np.ndarray:
r"""Generate single-mode marginal distributions from the displacement vector and covariance
matrix of a Gaussian state.
**Example usage:**
>>> mu = np.array([0.00000000, 2.82842712, 0.00000000,
>>> 0.00000000, 0.00000000, 0.00000000])
>>> V = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
>>> [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
>>> [0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
>>> [0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
>>> [0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
>>> [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
>>> n_max = 10
>>> marginals(mu, V, n_max)
array([[1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00],
[1.35335284e-01, 2.70670567e-01, 2.70670566e-01, 1.80447044e-01,
9.02235216e-02, 3.60894085e-02, 1.20298028e-02, 3.43708650e-03,
8.59271622e-04, 1.90949249e-04],
[1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00]])
Args:
mu (array): displacement vector
V (array): covariance matrix
n_max (int): maximum number of vibrational quanta in the distribution
hbar (float): the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`.
Returns:
array[list[float]]: marginal distributions
"""
if not V.shape[0] == V.shape[1]:
raise ValueError("The covariance matrix must be a square matrix")
if not len(mu) == len(V):
raise ValueError(
"The dimension of the displacement vector and the covariance matrix must be equal"
)
if n_max <= 0:
raise ValueError("The number of vibrational states must be larger than zero")
n_modes = len(mu) // 2
p = np.zeros((n_modes, n_max))
for mode in range(n_modes):
mui, vi = quantum.reduced_gaussian(mu, V, mode)
for i in range(n_max):
p[mode, i] = np.real(quantum.density_matrix_element(mui, vi, [i], [i], hbar=hbar))
return p
| 35.271949
| 119
| 0.641209
|
794cc4a7a0c737968c35e7ceccad291048183340
| 504
|
py
|
Python
|
kolibri/core/wage_tracker/api_urls.py
|
MihirBharali/akshar-app
|
74f01615da5a33eebf393e5bc3940b8f25b6d4f0
|
[
"MIT"
] | null | null | null |
kolibri/core/wage_tracker/api_urls.py
|
MihirBharali/akshar-app
|
74f01615da5a33eebf393e5bc3940b8f25b6d4f0
|
[
"MIT"
] | null | null | null |
kolibri/core/wage_tracker/api_urls.py
|
MihirBharali/akshar-app
|
74f01615da5a33eebf393e5bc3940b8f25b6d4f0
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include
from django.conf.urls import url
from rest_framework import routers
from .api import UserWageAccountViewset, UserWageAccountTransactionViewset
router = routers.SimpleRouter()
router.register(
r"account", UserWageAccountViewset, base_name="account"
)
router.register(
r"transactions", UserWageAccountTransactionViewset, base_name="transactions"
)
urlpatterns = [url(
r"^",
include(router.urls)
),
]
| 28
| 80
| 0.704365
|
794cc4b8d00e2ba42483fb62c856fd380d43727e
| 37,393
|
py
|
Python
|
tests/models/test_channel.py
|
Solrkohen/Tucson
|
2aca2186d74fdbfe77a008e3f05f6f2f12fca0aa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/models/test_channel.py
|
Solrkohen/Tucson
|
2aca2186d74fdbfe77a008e3f05f6f2f12fca0aa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/models/test_channel.py
|
Solrkohen/Tucson
|
2aca2186d74fdbfe77a008e3f05f6f2f12fca0aa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import os
from conda.common.io import env_var
from conda._vendor.auxlib.ish import dals
from conda.base.context import context, reset_context
from conda.common.compat import odict
from conda.common.configuration import YamlRawParameter
from conda.common.url import join_url
from conda.common.yaml import yaml_load
from conda.models.channel import Channel, prioritize_channels
from conda.utils import on_win
from logging import getLogger
from unittest import TestCase
import conda.models.channel
try:
from unittest.mock import patch
except ImportError:
from mock import patch
log = getLogger(__name__)
class DefaultConfigChannelTests(TestCase):
@classmethod
def setUpClass(cls):
reset_context()
cls.platform = context.subdir
cls.DEFAULT_URLS = ['https://repo.continuum.io/pkgs/free/%s' % cls.platform,
'https://repo.continuum.io/pkgs/free/noarch',
'https://repo.continuum.io/pkgs/r/%s' % cls.platform,
'https://repo.continuum.io/pkgs/r/noarch',
'https://repo.continuum.io/pkgs/pro/%s' % cls.platform,
'https://repo.continuum.io/pkgs/pro/noarch']
if on_win:
cls.DEFAULT_URLS.extend(['https://repo.continuum.io/pkgs/msys2/%s' % cls.platform,
'https://repo.continuum.io/pkgs/msys2/noarch'])
def test_channel_alias_channels(self):
channel = Channel('binstar/label/dev')
assert channel.channel_name == "binstar/label/dev"
assert channel.channel_location == "conda.anaconda.org"
assert channel.platform is None
assert channel.package_filename is None
assert channel.canonical_name == "binstar/label/dev"
assert channel.urls() == [
'https://conda.anaconda.org/binstar/label/dev/%s' % context.subdir,
'https://conda.anaconda.org/binstar/label/dev/noarch',
]
def test_channel_cache(self):
Channel._reset_state()
assert len(Channel._cache_) == 0
dc = Channel('defaults')
assert len(Channel._cache_) == 1
dc1 = Channel('defaults')
assert len(Channel._cache_) == 1
dc2 = Channel('defaults')
assert len(Channel._cache_) == 1
assert dc1 is dc
assert dc2 is dc
dc3 = Channel(dc)
assert len(Channel._cache_) == 1
assert dc3 is dc
ccc = Channel('conda-canary')
assert len(Channel._cache_) == 2
ccc1 = Channel('conda-canary')
assert len(Channel._cache_) == 2
assert ccc1 is ccc
def test_default_channel(self):
dc = Channel('defaults')
assert dc.canonical_name == 'defaults'
assert dc.urls() == self.DEFAULT_URLS
def test_url_channel_w_platform(self):
channel = Channel('https://repo.continuum.io/pkgs/free/osx-64')
assert channel.scheme == "https"
assert channel.location == "repo.continuum.io"
assert channel.platform == 'osx-64'
assert channel.name == 'pkgs/free'
assert channel.base_url == 'https://repo.continuum.io/pkgs/free'
assert channel.canonical_name == 'defaults'
assert channel.url() == 'https://repo.continuum.io/pkgs/free/osx-64'
assert channel.urls() == [
'https://repo.continuum.io/pkgs/free/osx-64',
'https://repo.continuum.io/pkgs/free/noarch',
]
def test_bare_channel(self):
url = "http://conda-01"
channel = Channel(url)
assert channel.scheme == "http"
assert channel.location == "conda-01"
assert channel.platform is None
assert channel.canonical_name == url
assert channel.name is None
assert channel.base_url == url
assert channel.url() == join_url(url, context.subdir)
assert channel.urls() == [
join_url(url, context.subdir),
join_url(url, 'noarch'),
]
class AnacondaServerChannelTests(TestCase):
@classmethod
def setUpClass(cls):
string = dals("""
channel_alias: https://10.2.3.4:8080/conda/t/tk-123-45
migrated_channel_aliases:
- https://conda.anaconda.org
- http://10.2.3.4:7070/conda
""")
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_load(string)))
context._set_raw_data(rd)
Channel._reset_state()
cls.platform = context.subdir
@classmethod
def tearDownClass(cls):
reset_context()
def test_channel_alias_w_conda_path(self):
channel = Channel('bioconda')
assert channel.channel_name == "bioconda"
assert channel.channel_location == "10.2.3.4:8080/conda"
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.scheme == "https"
assert channel.canonical_name == 'bioconda'
assert channel.urls() == [
"https://10.2.3.4:8080/conda/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/bioconda/noarch",
]
assert channel.token == "tk-123-45"
def test_channel_alias_w_subhcnnale(self):
channel = Channel('bioconda/label/dev')
assert channel.channel_name == "bioconda/label/dev"
assert channel.channel_location == "10.2.3.4:8080/conda"
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.scheme == "https"
assert channel.canonical_name == 'bioconda/label/dev'
assert channel.urls() == [
"https://10.2.3.4:8080/conda/bioconda/label/dev/%s" % self.platform,
"https://10.2.3.4:8080/conda/bioconda/label/dev/noarch",
]
assert channel.token == "tk-123-45"
def test_custom_token_in_channel(self):
channel = Channel("https://10.2.3.4:8080/conda/t/x1029384756/bioconda")
assert channel.channel_name == "bioconda"
assert channel.channel_location == "10.2.3.4:8080/conda"
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.token == "x1029384756"
assert channel.scheme == "https"
assert channel.canonical_name == 'bioconda'
assert channel.urls() == [
"https://10.2.3.4:8080/conda/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/bioconda/noarch",
]
def test_canonicalized_url_gets_correct_token(self):
channel = Channel("bioconda")
assert channel.urls() == [
"https://10.2.3.4:8080/conda/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/bioconda/noarch",
]
assert channel.urls(with_credentials=True) == [
"https://10.2.3.4:8080/conda/t/tk-123-45/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/t/tk-123-45/bioconda/noarch",
]
channel = Channel("https://10.2.3.4:8080/conda/bioconda")
assert channel.urls() == [
"https://10.2.3.4:8080/conda/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/bioconda/noarch",
]
assert channel.urls(with_credentials=True) == [
"https://10.2.3.4:8080/conda/t/tk-123-45/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/t/tk-123-45/bioconda/noarch",
]
channel = Channel("https://10.2.3.4:8080/conda/t/x1029384756/bioconda")
assert channel.urls() == [
"https://10.2.3.4:8080/conda/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/bioconda/noarch",
]
assert channel.urls(with_credentials=True) == [
"https://10.2.3.4:8080/conda/t/x1029384756/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/t/x1029384756/bioconda/noarch",
]
# what happens with the token if it's in the wrong places?
channel = Channel("https://10.2.3.4:8080/t/x1029384756/conda/bioconda")
assert channel.urls() == [
"https://10.2.3.4:8080/conda/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/bioconda/noarch",
]
assert channel.urls(with_credentials=True) == [
"https://10.2.3.4:8080/conda/t/x1029384756/bioconda/%s" % self.platform,
"https://10.2.3.4:8080/conda/t/x1029384756/bioconda/noarch",
]
class CustomConfigChannelTests(TestCase):
"""
Some notes about the tests in this class:
* The 'pkgs/free' channel is 'migrated' while the 'pkgs/pro' channel is not.
Thus test_pkgs_free and test_pkgs_pro have substantially different behavior.
"""
@classmethod
def setUpClass(cls):
string = dals("""
custom_channels:
darwin: https://some.url.somewhere/stuff
chuck: http://user1:pass2@another.url:8080/t/tk-1234/with/path
pkgs/free: http://192.168.0.15:8080
migrated_custom_channels:
darwin: s3://just/cant
chuck: file:///var/lib/repo/
pkgs/free: https://repo.continuum.io
migrated_channel_aliases:
- https://conda.anaconda.org
channel_alias: ftp://new.url:8082
default_channels:
- http://192.168.0.15:8080/pkgs/free
- http://192.168.0.15:8080/pkgs/pro
- http://192.168.0.15:8080/pkgs/msys2
""")
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_load(string)))
context._set_raw_data(rd)
Channel._reset_state()
cls.platform = context.subdir
cls.DEFAULT_URLS = ['http://192.168.0.15:8080/pkgs/free/%s' % cls.platform,
'http://192.168.0.15:8080/pkgs/free/noarch',
'http://192.168.0.15:8080/pkgs/pro/%s' % cls.platform,
'http://192.168.0.15:8080/pkgs/pro/noarch',
'http://192.168.0.15:8080/pkgs/msys2/%s' % cls.platform,
'http://192.168.0.15:8080/pkgs/msys2/noarch',
]
@classmethod
def tearDownClass(cls):
reset_context()
def test_pkgs_free(self):
channel = Channel('pkgs/free')
assert channel.channel_name == "pkgs/free"
assert channel.channel_location == "192.168.0.15:8080"
assert channel.canonical_name == "defaults"
assert channel.urls() == [
'http://192.168.0.15:8080/pkgs/free/%s' % self.platform,
'http://192.168.0.15:8080/pkgs/free/noarch',
]
channel = Channel('https://repo.continuum.io/pkgs/free')
assert channel.channel_name == "pkgs/free"
assert channel.channel_location == "192.168.0.15:8080"
assert channel.canonical_name == "defaults"
assert channel.urls() == [
'http://192.168.0.15:8080/pkgs/free/%s' % self.platform,
'http://192.168.0.15:8080/pkgs/free/noarch',
]
channel = Channel('https://repo.continuum.io/pkgs/free/noarch')
assert channel.channel_name == "pkgs/free"
assert channel.channel_location == "192.168.0.15:8080"
assert channel.canonical_name == "defaults"
assert channel.urls() == [
'http://192.168.0.15:8080/pkgs/free/noarch',
]
channel = Channel('https://repo.continuum.io/pkgs/free/label/dev')
assert channel.channel_name == "pkgs/free/label/dev"
assert channel.channel_location == "192.168.0.15:8080"
assert channel.canonical_name == "pkgs/free/label/dev"
assert channel.urls() == [
'http://192.168.0.15:8080/pkgs/free/label/dev/%s' % self.platform,
'http://192.168.0.15:8080/pkgs/free/label/dev/noarch',
]
channel = Channel('https://repo.continuum.io/pkgs/free/noarch/flask-1.0.tar.bz2')
assert channel.channel_name == "pkgs/free"
assert channel.channel_location == "192.168.0.15:8080"
assert channel.platform == "noarch"
assert channel.package_filename == "flask-1.0.tar.bz2"
assert channel.canonical_name == "defaults"
assert channel.urls() == [
'http://192.168.0.15:8080/pkgs/free/noarch',
]
def test_pkgs_pro(self):
channel = Channel('pkgs/pro')
assert channel.channel_name == "pkgs/pro"
assert channel.channel_location == "192.168.0.15:8080"
assert channel.canonical_name == "defaults"
assert channel.urls() == [
'http://192.168.0.15:8080/pkgs/pro/%s' % self.platform,
'http://192.168.0.15:8080/pkgs/pro/noarch',
]
channel = Channel('https://repo.continuum.io/pkgs/pro')
assert channel.channel_name == "pkgs/pro"
assert channel.channel_location == "repo.continuum.io"
assert channel.canonical_name == "defaults"
assert channel.urls() == [
'https://repo.continuum.io/pkgs/pro/%s' % self.platform,
'https://repo.continuum.io/pkgs/pro/noarch',
]
channel = Channel('https://repo.continuum.io/pkgs/pro/noarch')
assert channel.channel_name == "pkgs/pro"
assert channel.channel_location == "repo.continuum.io"
assert channel.canonical_name == "defaults"
assert channel.urls() == [
'https://repo.continuum.io/pkgs/pro/noarch',
]
channel = Channel('https://repo.continuum.io/pkgs/pro/label/dev')
assert channel.channel_name == "pkgs/pro/label/dev"
assert channel.channel_location == "repo.continuum.io"
assert channel.canonical_name == "pkgs/pro/label/dev"
assert channel.urls() == [
'https://repo.continuum.io/pkgs/pro/label/dev/%s' % self.platform,
'https://repo.continuum.io/pkgs/pro/label/dev/noarch',
]
channel = Channel('https://repo.continuum.io/pkgs/pro/noarch/flask-1.0.tar.bz2')
assert channel.channel_name == "pkgs/pro"
assert channel.channel_location == "repo.continuum.io"
assert channel.platform == "noarch"
assert channel.package_filename == "flask-1.0.tar.bz2"
assert channel.canonical_name == "defaults"
assert channel.urls() == [
'https://repo.continuum.io/pkgs/pro/noarch',
]
def test_custom_channels(self):
channel = Channel('darwin')
assert channel.channel_name == "darwin"
assert channel.channel_location == "some.url.somewhere/stuff"
channel = Channel('https://some.url.somewhere/stuff/darwin')
assert channel.channel_name == "darwin"
assert channel.channel_location == "some.url.somewhere/stuff"
channel = Channel('https://some.url.somewhere/stuff/darwin/label/dev')
assert channel.channel_name == "darwin/label/dev"
assert channel.channel_location == "some.url.somewhere/stuff"
assert channel.platform is None
channel = Channel('https://some.url.somewhere/stuff/darwin/label/dev/linux-64')
assert channel.channel_name == "darwin/label/dev"
assert channel.channel_location == "some.url.somewhere/stuff"
assert channel.platform == 'linux-64'
assert channel.package_filename is None
channel = Channel('https://some.url.somewhere/stuff/darwin/label/dev/linux-64/flask-1.0.tar.bz2')
assert channel.channel_name == "darwin/label/dev"
assert channel.channel_location == "some.url.somewhere/stuff"
assert channel.platform == 'linux-64'
assert channel.package_filename == 'flask-1.0.tar.bz2'
assert channel.auth is None
assert channel.token is None
assert channel.scheme == "https"
channel = Channel('https://some.url.somewhere/stuff/darwin/label/dev/linux-64/flask-1.0.tar.bz2')
assert channel.channel_name == "darwin/label/dev"
assert channel.channel_location == "some.url.somewhere/stuff"
assert channel.platform == 'linux-64'
assert channel.package_filename == 'flask-1.0.tar.bz2'
assert channel.auth is None
assert channel.token is None
assert channel.scheme == "https"
def test_custom_channels_port_token_auth(self):
channel = Channel('chuck')
assert channel.channel_name == "chuck"
assert channel.channel_location == "another.url:8080/with/path"
assert channel.auth == 'user1:pass2'
assert channel.token == 'tk-1234'
assert channel.scheme == "http"
channel = Channel('https://another.url:8080/with/path/chuck/label/dev/linux-64/flask-1.0.tar.bz2')
assert channel.channel_name == "chuck/label/dev"
assert channel.channel_location == "another.url:8080/with/path"
assert channel.auth == 'user1:pass2'
assert channel.token == 'tk-1234'
assert channel.scheme == "https"
assert channel.platform == 'linux-64'
assert channel.package_filename == 'flask-1.0.tar.bz2'
def test_migrated_custom_channels(self):
channel = Channel('s3://just/cant/darwin/osx-64')
assert channel.channel_name == "darwin"
assert channel.channel_location == "some.url.somewhere/stuff"
assert channel.platform == 'osx-64'
assert channel.package_filename is None
assert channel.auth is None
assert channel.token is None
assert channel.scheme == "https"
assert channel.canonical_name == "darwin"
assert channel.url() == "https://some.url.somewhere/stuff/darwin/osx-64"
assert channel.urls() == [
"https://some.url.somewhere/stuff/darwin/osx-64",
"https://some.url.somewhere/stuff/darwin/noarch",
]
assert Channel(channel.canonical_name).urls() == [
"https://some.url.somewhere/stuff/darwin/%s" % self.platform,
"https://some.url.somewhere/stuff/darwin/noarch",
]
channel = Channel('https://some.url.somewhere/stuff/darwin/noarch/a-mighty-fine.tar.bz2')
assert channel.channel_name == "darwin"
assert channel.channel_location == "some.url.somewhere/stuff"
assert channel.platform == 'noarch'
assert channel.package_filename == 'a-mighty-fine.tar.bz2'
assert channel.auth is None
assert channel.token is None
assert channel.scheme == "https"
assert channel.canonical_name == "darwin"
assert channel.url() == "https://some.url.somewhere/stuff/darwin/noarch/a-mighty-fine.tar.bz2"
assert channel.urls() == [
"https://some.url.somewhere/stuff/darwin/noarch",
]
assert Channel(channel.canonical_name).urls() == [
"https://some.url.somewhere/stuff/darwin/%s" % self.platform,
"https://some.url.somewhere/stuff/darwin/noarch",
]
def test_local_channel(self):
Channel._reset_state()
channel = Channel('local')
assert channel._channels[0].name.rsplit('/', 1)[-1] == 'conda-bld'
assert channel.channel_name == "local"
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.token is None
assert channel.scheme is None
assert channel.canonical_name == "local"
local_channel_first_subchannel = channel._channels[0].name
channel = Channel(local_channel_first_subchannel)
assert channel.channel_name == local_channel_first_subchannel
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.token is None
assert channel.scheme == "file"
assert channel.canonical_name == "local"
assert channel.urls() == Channel('local').urls()
assert channel.urls()[0].startswith('file:///')
def test_defaults_channel(self):
channel = Channel('defaults')
assert channel.name == 'defaults'
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.token is None
assert channel.scheme is None
assert channel.canonical_name == 'defaults'
assert channel.urls() == self.DEFAULT_URLS
def test_file_channel(self):
channel = Channel("file:///var/folders/cp/7r2s_s593j7_cpdtp/T/5d9f5e45/osx-64/flask-0.10.1-py35_2.tar.bz2")
assert channel.name == '5d9f5e45'
assert channel.location == '/var/folders/cp/7r2s_s593j7_cpdtp/T'
assert channel.platform == 'osx-64'
assert channel.package_filename == "flask-0.10.1-py35_2.tar.bz2"
assert channel.auth is None
assert channel.token is None
assert channel.scheme == "file"
assert channel.url() == "file:///var/folders/cp/7r2s_s593j7_cpdtp/T/5d9f5e45/osx-64/flask-0.10.1-py35_2.tar.bz2"
assert channel.urls() == [
"file:///var/folders/cp/7r2s_s593j7_cpdtp/T/5d9f5e45/osx-64",
"file:///var/folders/cp/7r2s_s593j7_cpdtp/T/5d9f5e45/noarch"
]
assert channel.canonical_name == 'file:///var/folders/cp/7r2s_s593j7_cpdtp/T/5d9f5e45'
def test_old_channel_alias(self):
cf_urls = ["ftp://new.url:8082/conda-forge/%s" % self.platform,
"ftp://new.url:8082/conda-forge/noarch"]
assert Channel('conda-forge').urls() == cf_urls
url = "https://conda.anaconda.org/conda-forge/osx-64/some-great-package.tar.bz2"
assert Channel(url).canonical_name == 'conda-forge'
assert Channel(url).base_url == 'ftp://new.url:8082/conda-forge'
assert Channel(url).url() == "ftp://new.url:8082/conda-forge/osx-64/some-great-package.tar.bz2"
assert Channel(url).urls() == [
"ftp://new.url:8082/conda-forge/osx-64",
"ftp://new.url:8082/conda-forge/noarch",
]
channel = Channel("https://conda.anaconda.org/conda-forge/label/dev/linux-64/some-great-package.tar.bz2")
assert channel.url() == "ftp://new.url:8082/conda-forge/label/dev/linux-64/some-great-package.tar.bz2"
assert channel.urls() == [
"ftp://new.url:8082/conda-forge/label/dev/linux-64",
"ftp://new.url:8082/conda-forge/label/dev/noarch",
]
class ChannelAuthTokenPriorityTests(TestCase):
@classmethod
def setUpClass(cls):
string = dals("""
custom_channels:
chuck: http://user1:pass2@another.url:8080/with/path/t/tk-1234
chuck/subchan: http://user33:pass44@another.url:8080/with/path/t/tk-1234
channel_alias: ftp://nm:ps@new.url:8082/t/zyx-wvut/
channels:
- mickey
- https://conda.anaconda.cloud/t/tk-12-token/minnie
- http://dont-do:this@4.3.2.1/daffy/label/main
default_channels:
- http://192.168.0.15:8080/pkgs/free
- donald/label/main
- http://us:pw@192.168.0.15:8080/t/tkn-123/pkgs/r
""")
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_load(string)))
context._set_raw_data(rd)
Channel._reset_state()
cls.platform = context.subdir
@classmethod
def tearDownClass(cls):
reset_context()
def test_named_custom_channel(self):
channel = Channel("chuck")
assert channel.canonical_name == "chuck"
assert channel.location == "another.url:8080/with/path"
assert channel.url() == "http://another.url:8080/with/path/chuck/%s" % self.platform
assert channel.url(True) == "http://user1:pass2@another.url:8080/with/path/t/tk-1234/chuck/%s" % self.platform
assert channel.urls() == [
"http://another.url:8080/with/path/chuck/%s" % self.platform,
"http://another.url:8080/with/path/chuck/noarch",
]
assert channel.urls(True) == [
"http://user1:pass2@another.url:8080/with/path/t/tk-1234/chuck/%s" % self.platform,
"http://user1:pass2@another.url:8080/with/path/t/tk-1234/chuck/noarch",
]
channel = Channel("chuck/label/dev")
assert channel.canonical_name == "chuck/label/dev"
assert channel.location == "another.url:8080/with/path"
assert channel.url() == "http://another.url:8080/with/path/chuck/label/dev/%s" % self.platform
assert channel.url(True) == "http://user1:pass2@another.url:8080/with/path/t/tk-1234/chuck/label/dev/%s" % self.platform
assert channel.urls() == [
"http://another.url:8080/with/path/chuck/label/dev/%s" % self.platform,
"http://another.url:8080/with/path/chuck/label/dev/noarch",
]
assert channel.urls(True) == [
"http://user1:pass2@another.url:8080/with/path/t/tk-1234/chuck/label/dev/%s" % self.platform,
"http://user1:pass2@another.url:8080/with/path/t/tk-1234/chuck/label/dev/noarch",
]
def test_url_custom_channel(self):
# scheme and credentials within url should override what's registered in config
channel = Channel("https://newuser:newpass@another.url:8080/with/path/t/new-token/chuck/label/dev")
assert channel.canonical_name == "chuck/label/dev"
assert channel.location == "another.url:8080/with/path"
assert channel.url() == "https://another.url:8080/with/path/chuck/label/dev/%s" % self.platform
assert channel.url(True) == "https://newuser:newpass@another.url:8080/with/path/t/new-token/chuck/label/dev/%s" % self.platform
assert channel.urls() == [
"https://another.url:8080/with/path/chuck/label/dev/%s" % self.platform,
"https://another.url:8080/with/path/chuck/label/dev/noarch",
]
assert channel.urls(True) == [
"https://newuser:newpass@another.url:8080/with/path/t/new-token/chuck/label/dev/%s" % self.platform,
"https://newuser:newpass@another.url:8080/with/path/t/new-token/chuck/label/dev/noarch",
]
def test_named_custom_channel_w_subchan(self):
channel = Channel("chuck/subchan")
assert channel.canonical_name == "chuck/subchan"
assert channel.location == "another.url:8080/with/path"
assert channel.url() == "http://another.url:8080/with/path/chuck/subchan/%s" % self.platform
assert channel.url(
True) == "http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/%s" % self.platform
assert channel.urls() == [
"http://another.url:8080/with/path/chuck/subchan/%s" % self.platform,
"http://another.url:8080/with/path/chuck/subchan/noarch",
]
assert channel.urls(True) == [
"http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/%s" % self.platform,
"http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/noarch",
]
channel = Channel("chuck/subchan/label/main")
assert channel.canonical_name == "chuck/subchan/label/main"
assert channel.location == "another.url:8080/with/path"
assert channel.url() == "http://another.url:8080/with/path/chuck/subchan/label/main/%s" % self.platform
assert channel.url(
True) == "http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/label/main/%s" % self.platform
assert channel.urls() == [
"http://another.url:8080/with/path/chuck/subchan/label/main/%s" % self.platform,
"http://another.url:8080/with/path/chuck/subchan/label/main/noarch",
]
assert channel.urls(True) == [
"http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/label/main/%s" % self.platform,
"http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/label/main/noarch",
]
def test_url_custom_channel_w_subchan(self):
channel = Channel("http://another.url:8080/with/path/chuck/subchan/label/main")
assert channel.canonical_name == "chuck/subchan/label/main"
assert channel.location == "another.url:8080/with/path"
assert channel.url() == "http://another.url:8080/with/path/chuck/subchan/label/main/%s" % self.platform
assert channel.url(True) == "http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/label/main/%s" % self.platform
assert channel.urls() == [
"http://another.url:8080/with/path/chuck/subchan/label/main/%s" % self.platform,
"http://another.url:8080/with/path/chuck/subchan/label/main/noarch",
]
assert channel.urls(True) == [
"http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/label/main/%s" % self.platform,
"http://user33:pass44@another.url:8080/with/path/t/tk-1234/chuck/subchan/label/main/noarch",
]
def test_channel_alias(self):
channel = Channel("charlie")
assert channel.canonical_name == "charlie"
assert channel.location == "new.url:8082"
assert channel.url() == "ftp://new.url:8082/charlie/%s" % self.platform
assert channel.url(True) == "ftp://nm:ps@new.url:8082/t/zyx-wvut/charlie/%s" % self.platform
assert channel.urls() == [
"ftp://new.url:8082/charlie/%s" % self.platform,
"ftp://new.url:8082/charlie/noarch",
]
assert channel.urls(True) == [
"ftp://nm:ps@new.url:8082/t/zyx-wvut/charlie/%s" % self.platform,
"ftp://nm:ps@new.url:8082/t/zyx-wvut/charlie/noarch",
]
channel = Channel("charlie/label/dev")
assert channel.canonical_name == "charlie/label/dev"
assert channel.location == "new.url:8082"
assert channel.url() == "ftp://new.url:8082/charlie/label/dev/%s" % self.platform
assert channel.url(True) == "ftp://nm:ps@new.url:8082/t/zyx-wvut/charlie/label/dev/%s" % self.platform
assert channel.urls() == [
"ftp://new.url:8082/charlie/label/dev/%s" % self.platform,
"ftp://new.url:8082/charlie/label/dev/noarch",
]
assert channel.urls(True) == [
"ftp://nm:ps@new.url:8082/t/zyx-wvut/charlie/label/dev/%s" % self.platform,
"ftp://nm:ps@new.url:8082/t/zyx-wvut/charlie/label/dev/noarch",
]
channel = Channel("ftp://nm:ps@new.url:8082/t/new-token/charlie/label/dev")
assert channel.canonical_name == "charlie/label/dev"
assert channel.location == "new.url:8082"
assert channel.url() == "ftp://new.url:8082/charlie/label/dev/%s" % self.platform
assert channel.url(
True) == "ftp://nm:ps@new.url:8082/t/new-token/charlie/label/dev/%s" % self.platform
assert channel.urls() == [
"ftp://new.url:8082/charlie/label/dev/%s" % self.platform,
"ftp://new.url:8082/charlie/label/dev/noarch",
]
assert channel.urls(True) == [
"ftp://nm:ps@new.url:8082/t/new-token/charlie/label/dev/%s" % self.platform,
"ftp://nm:ps@new.url:8082/t/new-token/charlie/label/dev/noarch",
]
def test_default_channels(self):
channel = Channel('defaults')
assert channel.canonical_name == "defaults"
assert channel.location is None
assert channel.url() is None
assert channel.url(True) is None
assert channel.urls() == [
"http://192.168.0.15:8080/pkgs/free/%s" % self.platform,
"http://192.168.0.15:8080/pkgs/free/noarch",
"ftp://new.url:8082/donald/label/main/%s" % self.platform,
"ftp://new.url:8082/donald/label/main/noarch",
"http://192.168.0.15:8080/pkgs/r/%s" % self.platform,
"http://192.168.0.15:8080/pkgs/r/noarch",
]
assert channel.urls(True) == [
"http://192.168.0.15:8080/pkgs/free/%s" % self.platform,
"http://192.168.0.15:8080/pkgs/free/noarch",
"ftp://nm:ps@new.url:8082/t/zyx-wvut/donald/label/main/%s" % self.platform,
"ftp://nm:ps@new.url:8082/t/zyx-wvut/donald/label/main/noarch",
"http://us:pw@192.168.0.15:8080/t/tkn-123/pkgs/r/%s" % self.platform,
"http://us:pw@192.168.0.15:8080/t/tkn-123/pkgs/r/noarch",
]
channel = Channel("ftp://new.url:8082/donald/label/main")
assert channel.canonical_name == "defaults"
channel = Channel("donald/label/main")
assert channel.canonical_name == "defaults"
channel = Channel("ftp://new.url:8081/donald")
assert channel.location == "new.url:8081"
assert channel.canonical_name == "donald"
class UrlChannelTests(TestCase):
def test_file_urls(self):
url = "file:///machine/shared_folder"
c = Channel(url)
assert c.scheme == "file"
assert c.auth is None
assert c.location == "/machine"
assert c.token is None
assert c.name == "shared_folder"
assert c.platform is None
assert c.package_filename is None
assert c.canonical_name == "file:///machine/shared_folder"
assert c.url() == "file:///machine/shared_folder/%s" % context.subdir
assert c.urls() == [
"file:///machine/shared_folder/%s" % context.subdir,
"file:///machine/shared_folder/noarch",
]
def test_file_url_with_backslashes(self):
url = "file://\\machine\\shared_folder\\path\\conda"
c = Channel(url)
assert c.scheme == "file"
assert c.auth is None
assert c.location == "/machine/shared_folder/path"
assert c.token is None
assert c.name == "conda"
assert c.platform is None
assert c.package_filename is None
assert c.canonical_name == "file:///machine/shared_folder/path/conda"
assert c.url() == "file:///machine/shared_folder/path/conda/%s" % context.subdir
assert c.urls() == [
"file:///machine/shared_folder/path/conda/%s" % context.subdir,
"file:///machine/shared_folder/path/conda/noarch",
]
def test_env_var_file_urls(self):
channels = ("file://\\\\network_share\\shared_folder\\path\\conda,"
"https://some.url/ch_name,"
"file:///some/place/on/my/machine")
with env_var("CONDA_CHANNELS", channels, reset_context):
assert context.channels == (
"file://\\\\network_share\\shared_folder\\path\\conda",
"https://some.url/ch_name",
"file:///some/place/on/my/machine",
)
prioritized = prioritize_channels(context.channels)
assert prioritized == OrderedDict((
("file://network_share/shared_folder/path/conda/%s" % context.subdir, ("file://network_share/shared_folder/path/conda", 0)),
("file://network_share/shared_folder/path/conda/noarch", ("file://network_share/shared_folder/path/conda", 0)),
("https://some.url/ch_name/%s" % context.subdir, ("https://some.url/ch_name", 1)),
("https://some.url/ch_name/noarch", ("https://some.url/ch_name", 1)),
("file:///some/place/on/my/machine/%s" % context.subdir, ("file:///some/place/on/my/machine", 2)),
("file:///some/place/on/my/machine/noarch", ("file:///some/place/on/my/machine", 2)),
))
class UnknownChannelTests(TestCase):
def test_regression_against_unknown_none(self):
defaults = Channel('defaults')
channel = Channel(None)
assert channel.scheme is None
assert channel.location is None
assert channel.platform is None
assert channel.name == "<unknown>"
assert channel.canonical_name == "<unknown>"
assert channel.base_url is None
assert channel.url() == defaults.url()
assert channel.urls() == defaults.urls()
channel = Channel('<unknown>')
assert channel.scheme is None
assert channel.location is None
assert channel.platform is None
assert channel.name == "<unknown>"
assert channel.canonical_name == "<unknown>"
assert channel.base_url is None
assert channel.url() == defaults.url()
assert channel.urls() == defaults.urls()
channel = Channel('None:///<unknown>')
assert channel.scheme is None
assert channel.location is None
assert channel.platform is None
assert channel.name == "<unknown>"
assert channel.canonical_name == "<unknown>"
assert channel.base_url is None
assert channel.url() == defaults.url()
assert channel.urls() == defaults.urls()
channel = Channel('None')
assert channel.scheme is None
assert channel.location is None
assert channel.platform is None
assert channel.name == "<unknown>"
assert channel.canonical_name == "<unknown>"
assert channel.base_url is None
assert channel.url() == defaults.url()
assert channel.urls() == defaults.urls()
| 44.67503
| 140
| 0.618164
|
794cc513d15c2f5475ea1953814eb2e287842b8e
| 41,635
|
py
|
Python
|
pyne/ensdf.py
|
ypark234/pyne
|
b7c4932c0399e6a0881aea943b392fb97cd0b6bd
|
[
"MIT"
] | 1
|
2019-03-26T17:37:57.000Z
|
2019-03-26T17:37:57.000Z
|
pyne/ensdf.py
|
ypark234/pyne
|
b7c4932c0399e6a0881aea943b392fb97cd0b6bd
|
[
"MIT"
] | 58
|
2019-01-07T16:13:26.000Z
|
2019-05-09T15:56:26.000Z
|
pyne/ensdf.py
|
ypark234/pyne
|
b7c4932c0399e6a0881aea943b392fb97cd0b6bd
|
[
"MIT"
] | null | null | null |
from __future__ import division
import re
import sys
import copy
from collections import defaultdict
from warnings import warn
from pyne.utils import QAWarning
from pyne.utils import time_conv_dict
import numpy as np
from pyne import nucname, rxname, data
if sys.version_info[0] > 2:
basestring = str
warn(__name__ + " is not yet QA compliant.", QAWarning)
_valexp = re.compile('([0-9.]*)([Ee][+-]?\d*)')
_val = re.compile('(\d*)[.](\d*)')
_specialval = re.compile("([0-9. ]*)[+]([A-Z])")
_specialval2 = re.compile("([A-Z]*)[+]([0-9.]*)")
_errpm = re.compile('[+](\d*)[-](\d*)')
_err = re.compile('[ ]*(\d*)')
_base = '([ \d]{3}[ A-Za-z]{2})'
_ident = re.compile(_base + ' (.{30})(.{26})(.{7})(.{6})')
_g = re.compile(_base + ' G (.{10})(.{2})(.{8})(.{2}).{24}(.{7})(.{2})(.{10})'
+ '(.{2})')
_gc = re.compile(_base + '[0-9A-Za-z] [GE] (.{70})')
_beta = re.compile(_base + ' B (.{10})(.{2})(.{8})(.{2}).{10}(.{8})(.{6})')
_betac = re.compile(_base + '[0-9A-Za-z] ([BE]) (.{70})')
_ec = re.compile(_base + ' E (.{10})(.{2})(.{8})(.{2})'
+ '(.{8})(.{2})(.{8})(.{6})(.{10})(.{2})')
_p = re.compile(_base + ' P (.{10})(.{2})(.{18})(.{10})'
+ '(.{6}).{9}(.{10})(.{2})(.{4})')
_norm = re.compile(_base + ' N (.{10})(.{2})(.{8})(.{2})(.{8})(.{2})(.{8})'
+ '(.{6})(.{7})(.{2})')
_normp = re.compile(_base +
' PN (.{10})(.{2})(.{8})(.{2})(.{8})(.{2})(.{7})(.{2})')
_q = re.compile(_base + ' Q (.{10})(.{2})(.{8})(.{2})'
+ '(.{8})(.{2})(.{8})(.{6})')
_alpha = re.compile(_base + ' A (.{10})(.{2})(.{8})(.{2})(.{8})(.{2})')
_dp = re.compile(_base + ' D(.{1})(.{10})(.{2})(.{8})(.{2})(.{8})(.{10})'
+ '(.{6})')
_decays = ['B-', 'B+A', 'EC', 'B-A', 'B+', 'B+P', 'B-N', 'ECP', 'EC2P', 'N',
'2N', 'IT', 'B+2P', 'B-2N', 'B+3P', 'ECA', 'P', '2P', '2B-', 'SF',
'A', '2B+', '2EC', '14C']
_level_regex = re.compile(_base + ' L (.{10})(.{2})(.{18})(.{10})(.{6})'
+ '(.{9})(.{10})(.{2})(.{1})([ M])([ 1-9])')
_level_cont_regex = re.compile('([ \d]{3}[ A-Za-z]{2})[0-9A-Za-z] L (.*)')
def _getvalue(obj, fn=float, rn=None):
x = obj.strip()
x = x.replace('$', '')
x = x.replace('?', '')
try:
return fn(x)
except ValueError:
return rn
def _to_id(nuc):
if 'NN' not in nuc:
nucid = nucname.ensdf_to_id(nuc.strip())
else:
warn('Neutron data not supported!')
return 0
return nucid
# Energy to half-life conversion: T1/2= ln(2) × (h/2 pi) / energy
# See http://www.nndc.bnl.gov/nudat2/help/glossary.jsp#halflife
# NIST CODATA https://physics.nist.gov/cgi-bin/cuu/Value?hbar
# h-bar = 1.054 571 800(13) x 1e-34 J
# 1 J = 6.241 509 126(38) x 1e18 eV
HBAR_LN2 = 4.5623775832376968e-16 # h-bar ln(2) in eV s
energy_conv_dict = {'ev': HBAR_LN2,
'kev': 1e-3 * HBAR_LN2,
'mev': 1e-6 * HBAR_LN2,
}
def _halflife_to_seconds(value, err, units):
"""Converts a halflife with err and units to seconds.
Parameters
----------
value: number
Time or energy, depending on units.
err : number or (number, number)
Uncertainty, or (plus, minus) uncertainty in [units].
units : str
Units flag, eg 'min', 'ms', 'days', or even 'MeV'.
Returns
-------
sec_time : float
Time value in [sec].
sec_err : None or float or (float, float) in [sec].
Time uncertainty in [sec], or (plus, minus) if asymmetric uncertainty.
"""
if err is None:
plus, minus = 0, 0
elif np.isscalar(err):
plus, minus = err, err
else:
plus, minus = err
units = units.lower()
scale = time_conv_dict.get(units, None)
if scale is not None:
sec_time = scale * value
sec_err = (scale * plus, scale * minus)
else:
scale = energy_conv_dict[units]
sec_time = scale / value
sec_err = (scale / max(0.1*value, value - minus) - sec_time,
sec_time - scale / (value + plus))
if err is None:
return sec_time, None
elif sec_err[0] == sec_err[1]:
return sec_time, sec_err[0]
else:
return sec_time, sec_err
def _to_time(tstr, errstr):
t = tstr.strip()
# This accepts questionable levels
t = t.replace('?', '')
tobj = [s.strip(' ()') for s in t.split()]
if len(tobj) == 2:
t, t_unit = tobj
value, err = _get_val_err(t, errstr)
tfinal, tfinalerr = _halflife_to_seconds(value, err, t_unit)
elif 'STABLE' in t:
tfinal = np.inf
tfinalerr = None
else:
tfinal = None
tfinalerr = None
return tfinal, tfinalerr
def _get_val_err(valstr, errstr):
pm = _errpm.match(errstr)
err = _err.match(errstr)
if pm is None and err.group(1) == '':
return _getvalue(valstr), None
val = _valexp.match(valstr)
if val is None:
valexp = ''
val = valstr
else:
valexp = val.group(2)
val = val.group(1)
punc = _val.match(val.strip())
if pm is not None:
if punc is None:
errplus = _getvalue(pm.group(1) + valexp)
errminus = _getvalue(pm.group(2) + valexp)
else:
errplus = _get_err(len(punc.group(2)), pm.group(1), valexp)
errminus = _get_err(len(punc.group(2)), pm.group(2), valexp)
return _getvalue(valstr), (errplus, errminus)
else:
if punc is None:
errplus = _getvalue(errstr + valexp)
else:
errplus = _get_err(len(punc.group(2)), errstr, valexp)
return _getvalue(valstr), errplus
def _get_err(plen, errstr, valexp):
errp = list((errstr.strip()).zfill(plen))
errp.insert(-plen, '.')
return _getvalue(''.join(errp) + valexp)
def _parse_level_record(l_rec):
"""
This Parses and ENSDF level record
Parameters
----------
g : re.MatchObject
regular expression MatchObject
Returns
-------
e : float
Level energy in keV
tfinal : float
Half life in seconds
from_nuc : int
nuc id of nuclide
state : int
metastable state of level
special : str
A-Z character denoting a group of known levels with no reference
to the ground state. P and N are special characters reserved for
proton and neutron resonances given in center of mass system energy.
"""
lm = re.match("[ ]*([A-Z]+)(?![A-Z0-9+])", l_rec.group(2))
spv = _specialval.match(l_rec.group(2).strip())
spv2 = _specialval2.match(l_rec.group(2).strip())
special = ' '
if lm is not None:
special = lm.group(1)
if "S" in special and len(special.strip()) > 1:
special = special.strip()[1]
e = 0.0
de = np.nan
elif spv is not None:
e, de = _get_val_err(spv.group(1), l_rec.group(3))
special = spv.group(2)
elif spv2 is not None:
e, de = _get_val_err(spv2.group(2), l_rec.group(3))
special = spv2.group(1)
if "S" in special and len(special.strip()) > 1:
special = special.strip()[1]
else:
e, de = _get_val_err(l_rec.group(2).strip('() '), l_rec.group(3))
tfinal, tfinalerr = _to_time(l_rec.group(5), l_rec.group(6))
from_nuc = _to_id(l_rec.group(1))
m = l_rec.group(11)
s = l_rec.group(12)
state = 0
if m == 'M':
state = s.strip()
if 0 < len(state):
state = int(state)
else:
state = 1
return e, tfinal, from_nuc, state, special
def _parse_level_continuation_record(lc_rec):
"""
This Parses and ENSDF level record
Parameters
----------
g : re.MatchObject
regular expression MatchObject
Returns
-------
dat : dict
dictionary of branching ratios of different reaction channels
"""
g = lc_rec.groups()
dat = {}
raw_children = g[-1].replace(' AP ', '=')
raw_children = raw_children.replace('$', ' ').split()
for raw_child in raw_children:
if '=' in raw_child:
rx, br = raw_child.split('=')[:2]
br = br.strip()
else:
continue
if '%' in rx and '?' not in br and len(br) > 0:
dat[rx] = br
return dat
def _parse_gamma_record(g):
"""
This parses an ENSDF gamma record
Parameters
----------
g : re.MatchObject
regular expression MatchObject
Returns
-------
dat : np.ndarray
This array contains 6 floats corresponding to:
* gamma ray energy in keV
* uncertainty in energy
* intensity
* uncertainty in intensity
* electron conversion intensity
* uncertainty in electron conversion intensity
"""
en, en_err = _get_val_err(g.group(2), g.group(3))
inten, inten_err = _get_val_err(g.group(4), g.group(5))
conv, conv_err = _get_val_err(g.group(6), g.group(7))
tti, tti_err = _get_val_err(g.group(8), g.group(9))
return [en, en_err, inten, inten_err, conv, conv_err, tti, tti_err]
def _parse_gamma_continuation_record(g, inten, tti):
"""
This parses an ENSDF gamma continuation record
"""
conversions = {}
entries = g.group(2).split('$')
for entry in entries:
entry = entry.replace('AP', '=')
entry = entry.replace('EL1C+EL2C', 'LC')
if '+=' in entry or 'EAV' in entry:
continue
if 'C=' in entry:
tsplit = entry.split('C')
else:
tsplit = entry.split('=')
tsplit[0] = tsplit[0].lstrip('C')
greff = inten
if '/T' in entry:
tsplit = entry.split('/T')
greff = tti
if greff is None:
greff = inten
if greff is None:
greff = 1.0
if len(tsplit) == 2:
conv = None
err = None
contype = tsplit[0].lstrip('E')
eff = tsplit[1].lstrip('= ').split()
if len(eff) == 2:
conv, err = _get_val_err(eff[0], eff[1])
elif len(eff) == 1:
conv = _getvalue(eff[0])
if conv is None and contype not in conversions:
conversions[contype] = (None, None)
elif contype not in conversions:
conversions[contype] = (conv * greff, err)
return conversions
def _parse_beta_record(b_rec):
"""
This parses an ENSDF beta minus record
Parameters
----------
b_rec : re.MatchObject
regular expression MatchObject
Returns
-------
en : float
b- endpoint energy in keV
en_err : float
error in b- endpoint energy
ib : float
branch intensity
dib : float
error in branch intensity
logft : float
logft of the decay
dft : float
error in logft
"""
en, en_err = _get_val_err(b_rec.group(2), b_rec.group(3))
ib, dib = _get_val_err(b_rec.group(4), b_rec.group(5))
logft, dft = _get_val_err(b_rec.group(6), b_rec.group(7))
return en, en_err, ib, dib, logft, dft
def _parse_beta_continuation_record(bc_rec):
"""
This parse the beta continuation record for EAV
"""
entries = bc_rec.group(3).split('$')
eav = None
eav_err = None
for entry in entries:
if 'EAV' in entry and '=' in entry:
dat = entry.split('=')[1]
dat = dat.split()
if len(dat) == 2:
eav, eav_err = _get_val_err(dat[0], dat[1])
elif len(dat) == 1:
eav = _getvalue(dat[0])
return eav, eav_err
def _parse_ec_record(e_rec):
"""
This parses an ENSDF electron capture + b+ record
Parameters
----------
e_rec : re.MatchObject
regular expression MatchObject
Returns
-------
en : float
b+ endpoint energy in keV
en_err : float
error in b+ endpoint energy
ib : float
b+ branch intensity
dib : float
error in b+ branch intensity
ie : float
ec branch intensity
die : float
error in ec branch intensity
logft : float
logft of the decay
dft : float
error in logft
"""
en, en_err = _get_val_err(e_rec.group(2), e_rec.group(3))
ib, dib = _get_val_err(e_rec.group(4), e_rec.group(5))
ie, die = _get_val_err(e_rec.group(6), e_rec.group(7))
logft, dft = _get_val_err(e_rec.group(8), e_rec.group(9))
tti, dtti = _get_val_err(e_rec.group(10), e_rec.group(11))
return en, en_err, ib, dib, ie, die, logft, dft, tti, dtti
def _parse_normalization_record(n_rec):
"""
This parses an ENSDF normalization record
Parameters
----------
n_rec : re.MatchObject
regular expression MatchObject
Returns
-------
nr : float
Multiplier for converting relative photon intensity to photons per 100
decays of the parent through the decay branch or to photons per 100
neutron captures for (n,g).
nr_err : float
Uncertainty in nr
nt : float
Multiplier for converting relative transition intensity to transitions
per 100 decays of the parent through the decay branch or to photons
per 100 neutron captures for (n,g).
nt_err : float
Uncertainty in nt
br : float
Branching ratio multiplier for converting intensity per 100 decays
through this decay branch to intensity per 100 decays of the parent
nuclide.
br_err : float
Uncertainty in br
nb : float
Multiplier for converting relative B- and EC intensities to intensities
per 100 decays through this decay branch.
nb_err : float
Uncertainty in nb
"""
nr, nr_err = _get_val_err(n_rec.group(2), n_rec.group(3))
nt, nt_err = _get_val_err(n_rec.group(4), n_rec.group(5))
br, br_err = _get_val_err(n_rec.group(6), n_rec.group(7))
nb, nb_err = _get_val_err(n_rec.group(8), n_rec.group(9))
if nr is not None and br is not None:
nrbr = nr * br
else:
nrbr = None
if nr_err is not None and br_err is not None:
nrbr_err = nrbr*np.sqrt((br_err/br) ** 2 * (nr_err/nr) ** 2)
else:
nrbr_err = None
return nr, nr_err, nt, nt_err, br, br_err, nb, nb_err, nrbr, nrbr_err
def _parse_production_normalization_record(np_rec):
"""
This parses an ENSDF production normalization record
Parameters
----------
np_rec : re.MatchObject
regular expression MatchObject
Returns
-------
nrbr : float
Multiplier for converting relative photon intensity to photons per 100
decays of the parent nuclide
nrbr_err : float
Uncertainty in nrbr
ntbr : float
Multiplier for converting relative transition intensity to transitions
per 100 decays of the parent nuclide
ntbr_err : float
Uncertainty in ntbr
nbbr: float
Multiplier for converting relative B- and EC intensities to intensity
per 100 decays of the parent nuclide
nbbr_err : float
Uncertainty in nbbr
"""
nrbr, nrbr_err = _get_val_err(np_rec.group(2), np_rec.group(3))
ntbr, ntbr_err = _get_val_err(np_rec.group(4), np_rec.group(5))
nbbr, nbbr_err = _get_val_err(np_rec.group(6), np_rec.group(7))
return nrbr, nrbr_err, ntbr, ntbr_err, nbbr, nbbr_err
def _parse_parent_record(p_rec):
"""
This parses an ENSDF parent record
Parameters
----------
p_rec : re.MatchObject
regular expression MatchObject
Returns
-------
tfinal : float
half-life in seconds
tfinalerr : float
Uncertainty in half-life in seconds
"""
lm = re.match("[ ]*([A-Z]+)(?![A-Z0-9+])", p_rec.group(2))
spv = _specialval.match(p_rec.group(2).strip())
spv2 = _specialval2.match(p_rec.group(2).strip())
special = ' '
if lm is not None:
special = lm.group(1)
if "S" in special and len(special.strip()) > 1:
special = special.strip()[1]
e = 0.0
de = np.nan
elif spv is not None:
e, de = _get_val_err(spv.group(1), p_rec.group(3))
special = spv.group(2)
elif spv2 is not None:
e, de = _get_val_err(spv2.group(2), p_rec.group(3))
special = spv2.group(1)
if "S" in special and len(special.strip()) > 1:
special = special.strip()[1]
else:
e, de = _get_val_err(p_rec.group(2).strip('() '), p_rec.group(3))
j = p_rec.group(4)
tfinal, tfinalerr = _to_time(p_rec.group(5), p_rec.group(6))
return p_rec.group(1), tfinal, tfinalerr, e, de, special
def _parse_qvalue_record(q_rec):
"""
This parses and ENSDF q-value record
Parameters
----------
q_rec : re.MatchObject
regular expression MatchObject
Returns
-------
qminus : float
total energy for B- decay (if qminus > 0 B- decay is possible)
dqminus : float
standard uncertainty in qminus
sn : float
neutron separation energy in keV
dsn : float
standard uncertainty in sn
sp : float
neutron separation energy in keV
dsp : float
standard uncertainty in sp
qa : float
total energy available for alpha decay of the ground state
dqa : float
standard uncertainty in qa
"""
qminus, dqminus = _get_val_err(q_rec.group(2), q_rec.group(3))
sn, dsn = _get_val_err(q_rec.group(4), q_rec.group(5))
sp, dsp = _get_val_err(q_rec.group(5), q_rec.group(7))
qa, dqa = _get_val_err(q_rec.group(8), q_rec.group(9))
return qminus, dqminus, sn, dsn, sp, dsp, qa, dqa
def _parse_alpha_record(a_rec):
"""
This parses and ENSDF alpha record
Parameters
----------
q_rec : re.MatchObject
regular expression MatchObject
Returns
-------
e : float
energy of alpha particle
de : float
standard uncertainty in energy
ia : float
intensity of the decay branch in percent
dia : float
standard uncertainty in intensity
hf : float
hindrance factor
dhf : float
standard uncertainty in hindrance factor
"""
e, de = _get_val_err(a_rec.group(2), a_rec.group(3))
ia, dia = _get_val_err(a_rec.group(4), a_rec.group(5))
hf, dhf = _get_val_err(a_rec.group(5), a_rec.group(7))
return e, de, ia, dia, hf, dhf
def _parse_delayed_particle_record(dp_rec):
"""
This parses and ENSDF delayed particle record
Parameters
----------
dp_rec : re.MatchObject
regular expression MatchObject
Returns
-------
ptype : str
symbol for delayed particle
e : float
particle energy
de : float
standard uncertainty in energy
ip : float
intensity of delayed particle in percent
dip : float
standard uncertainty in intensity
ei : float
energy level of the intermediate
t : float
half-life of the transition (in seconds)
dt : float
standard uncertainty in half-life
"""
ptype = dp_rec.group(2)
e, de = _get_val_err(dp_rec.group(3), dp_rec.group(4))
ip, dip = _get_val_err(dp_rec.group(5), dp_rec.group(6))
ei = _getvalue(dp_rec.group(7))
t, dt = _to_time(dp_rec.group(8), dp_rec.group(9))
return ptype, e, de, ip, dip, ei, t, dt
def _parse_decay_dataset(lines, decay_s):
"""
This parses a gamma ray dataset. It returns a tuple of the parsed data.
Parameters
----------
lines : list of str
list containing lines from one dataset of an ensdf file
decay_s : str
string of the decay type
Returns
-------
Tuple of decay parameters which is described in detail in gamma_rays docs
"""
gammarays = []
betas = []
alphas = []
ecbp = []
ident = _ident.match(lines[0])
daughter = ident.group(1)
daughter_id = abs(_to_id(daughter))
parent = ident.group(2).split()[0]
parent = parent.split('(')[0]
parents = parent.split(',')
if len(parents) > 1:
pfinal = abs(_to_id(parents[0]))
else:
pfinal = abs(_to_id(parents[0][:5]))
tfinal = None
tfinalerr = None
nrbr = None
nbbr = None
nrbr_err = None
nbbr_err = None
nb_err = None
br_err = None
nb = None
br = None
level = None
special = " "
goodgray = False
parent2 = None
for line in lines:
level_l = _level_regex.match(line)
if level_l is not None:
level, half_lifev, from_nuc, \
state, special = _parse_level_record(level_l)
continue
b_rec = _beta.match(line)
if b_rec is not None:
dat = _parse_beta_record(b_rec)
if parent2 is None:
bparent = pfinal
else:
bparent = parent2
level = 0.0 if level is None else level
bdaughter = abs(data.id_from_level(_to_id(daughter), level))
betas.append([bparent, bdaughter, dat[0], 0.0, dat[2]])
bc_rec = _betac.match(line)
if bc_rec is not None:
bcdat = _parse_beta_continuation_record(bc_rec)
if bcdat[0] is not None:
if bc_rec.group(2) == 'B':
betas[-1][3] = bcdat[0]
else:
ecbp[-1][3] = bcdat[0]
bggc = _gc.match(line)
conv = _parse_gamma_continuation_record(bggc, dat[2],
dat[8])
if 'K' in conv:
ecbp[-1][-3] = conv['K'][0]
if 'L' in conv:
ecbp[-1][-2] = conv['L'][0]
if 'M' in conv:
ecbp[-1][-1] = conv['M'][0]
a_rec = _alpha.match(line)
if a_rec is not None:
dat = _parse_alpha_record(a_rec)
if parent2 is None:
aparent = pfinal
else:
aparent = parent2
level = 0.0 if level is None else level
adaughter = abs(data.id_from_level(_to_id(daughter), level))
alphas.append([aparent, adaughter, dat[0], dat[2]])
ec_rec = _ec.match(line)
if ec_rec is not None:
dat = _parse_ec_record(ec_rec)
if parent2 is None:
ecparent = pfinal
else:
ecparent = parent2
level = 0.0 if level is None else level
ecdaughter = abs(data.id_from_level(_to_id(daughter), level))
ecbp.append([ecparent, ecdaughter, dat[0], 0.0, dat[2], dat[4],
0, 0, 0])
continue
g_rec = _g.match(line)
if g_rec is not None:
dat = _parse_gamma_record(g_rec)
if dat[0] is not None:
gparent = 0
gdaughter = 0
if level is not None:
gparent = abs(data.id_from_level(_to_id(daughter), level,
special))
dlevel = level - dat[0]
gdaughter = abs(data.id_from_level(_to_id(daughter), dlevel,
special))
if parent2 is None:
gp2 = pfinal
else:
gp2 = parent2
dat.insert(0, daughter_id)
dat.insert(0, gp2)
dat.insert(0, gdaughter)
dat.insert(0, gparent)
for i in range(3):
dat.append(0)
gammarays.append(dat)
goodgray = True
else:
goodgray = False
continue
gc_rec = _gc.match(line)
if gc_rec is not None and goodgray is True:
conv = _parse_gamma_continuation_record(gc_rec, gammarays[-1][6],
gammarays[-1][10])
if 'K' in conv:
gammarays[-1][-3] = conv['K'][0]
if 'L' in conv:
gammarays[-1][-2] = conv['L'][0]
if 'M' in conv:
gammarays[-1][-1] = conv['M'][0]
continue
n_rec = _norm.match(line)
if n_rec is not None:
nr, nr_err, nt, nt_err, br, br_err, nb, nb_err, nrbr, nrbr_err = \
_parse_normalization_record(n_rec)
if nb is not None and br is not None:
nbbr = nb * br
if nb_err is not None and br_err is not None and nb_err != 0:
nbbr_err = nbbr*((br_err/br) ** 2 * (nb_err/nb) ** 2) ** 0.5
continue
np_rec = _normp.match(line)
if np_rec is not None:
nrbr2, nrbr_err2, ntbr, ntbr_err, nbbr2, nbbr_err2 = \
_parse_production_normalization_record(np_rec)
if nrbr2 is not None and nrbr is None:
nrbr = nrbr2
nrbr_err = nrbr_err2
if nbbr2 is not None and nbbr is None:
nbbr = nbbr2
nbbr_err = nbbr_err2
continue
p_rec = _p.match(line)
if p_rec is not None:
# only 2 parents are supported so this can be here
multi = False
if parent2 is not None:
multi = True
pfinal = [parent2,]
tfinal = [t,]
tfinalerr = [terr,]
parent2, t, terr, e, e_err, special = _parse_parent_record(p_rec)
parent2 = abs(data.id_from_level(_to_id(parent2), e, special))
if terr is not None and not isinstance(terr, float):
terr = (terr[0] + terr[1])/2.0
if multi:
tfinal.append(t)
tfinalerr.append(terr)
pfinal.append(parent2)
else:
tfinal = t
tfinalerr = terr
pfinal = parent2
continue
if len(gammarays) > 0 or len(alphas) > 0 or len(betas) > 0 or len(ecbp) > 0:
if len(parents) > 1 and parent2 is None:
pfinal = []
for item in parents:
pfinal.append(_to_id(item))
return pfinal, daughter_id, rxname.id(decay_s.strip().lower()), \
tfinal, tfinalerr, \
br, br_err, nrbr, nrbr_err, nbbr, nbbr_err, gammarays, alphas, \
betas, ecbp
return None
_BAD_RX = frozenset([
# Be-6 doesn't really alpha decay (leaving He-2), rather it emits 2p
(40060000, 1089),
# Li-8 -> He-4 + beta- + alpha is really a shortcut for
# Li-8 -> Be-8 + beta- -> He-4 + alpha
(30080000, 1355894000),
])
def _adjust_ge100_branches(levellist):
"""This adjust branches that are greater than or equal to 100% to be
100% - sum(other branches). This helps prevent unphysical errors
downstream.
"""
n = len(levellist)
brsum = defaultdict(float)
bridx = defaultdict(lambda: (-1, -1.0))
baddies = []
for i, (nuc, rx, hl, lvl, br, ms, sp) in enumerate(levellist):
if rx == 0:
continue
if br >= bridx[nuc][1]:
bridx[nuc] = (i, br)
brsum[nuc] += br
nucrx = (nuc, rx)
if nucrx in _BAD_RX:
baddies.append(i)
# adjust branch ratios
for nuc, (i, br) in bridx.items():
row = levellist[i]
# this line ensures that all branches sum to 100.0 within floating point
new_br = 100.0 - brsum[nuc] + br
new_row = row[:4] + (new_br,) + row[5:]
levellist[i] = new_row
# remove bad reaction rows
for i in baddies[::-1]:
del levellist[i]
# State Id, Bad Metastable Number, (Replacement State ID, optional) Replacement Metastable Number
_BAD_METASTABLES = {
# Rh-110 misreports its ground state as a first meta-stable and its first
# metastable as its second.
(451100000, 1): 0,
(451100001, 2): 1,
# Pm-154 misreports its ground state as a first metastable
(611540000, 1): 0,
# Ga-72M is not listed as metastable
(310720002, 0): 1,
# Rh-108M is not listed as metastable
(451080004, 0): 1,
# Pm-136 mislabels two states as both metastable or ground.
# Replacing with what KAERI and NNDC report
(611360001, 2): (611360000, 0),
(611360000, 1): (611360001, 1),
}
def _adjust_metastables(levellist):
"""Adjusts misreported metastable states in place."""
for i in range(len(levellist)):
key = (levellist[i][0], levellist[i][5])
if key in _BAD_METASTABLES:
row = list(levellist[i])
new_id = _BAD_METASTABLES[key]
if not isinstance(new_id, int):
row[0], new_id = new_id
row[5] = new_id
levellist[i] = tuple(row)
# State Id, Rx Id : New Half-lives
_BAD_HALF_LIVES = {
# Eu-151 lists a very long half-life (5.364792e+25) even though it
# lists no reaction, and thus no children, and no branch ratio.
# set to infinity for consistency.
(631510000, 0): float('inf'),
}
def _adjust_half_lives(levellist):
"""Resets misbehaving half-lives to new value."""
for i in range(len(levellist)):
key = levellist[i][:2]
if key in _BAD_HALF_LIVES:
row = list(levellist[i])
row[2] = _BAD_HALF_LIVES[key]
levellist[i] = tuple(row)
def levels(filename, levellist=None):
"""
This takes an ENSDF filename or file object and parses the ADOPTED LEVELS
records to assign level numbers by energy. It also parses the different
reported decay types and branching ratios.
Parameters
----------
filename : str or file
Name of ENSDF formatted file or a file-like object containing ENSDF
formatted data
levellist : list of tuples
This is a list object which all newly processed levels will be added
to. If it's None a new one will be created.
Returns
-------
levellist : list of tuples
This is a list of all the level data. Each level has base entry with a
reaction id of 0 and additional entries for any listed decays. The
format of each row is:
nuc_id : int
The state_id of the level
rx_id : int
The id of the decay "reaction" in PyNE reaction id form.
half_life : float
Half life of the state in s
level : float
energy of the level in keV
branch_ratio : float
if rx_id != 0 this is the percent of decays in that channel
metastable : int
metastable id number of the level (if given)
special : string
single character denoting levels with unknown relation to ground
state
"""
badlist = ["ecsf", "34si", "|b{+-}fission", "{+24}ne",
"{+22}ne", "24ne", "b-f", "{+20}o", "2|e", "b++ec",
"ecp+ec2p", "ecf", "mg", "ne", "{+20}ne", "{+25}ne",
"{+28}mg", "sf(+ec+b+)"]
special = ""
if levellist is None:
levellist = []
if isinstance(filename, str):
with open(filename, 'r') as f:
dat = f.read()
else:
dat = filename.read()
datasets = dat.split(80 * " " + "\n")[0:-1]
for dataset in datasets:
lines = dataset.splitlines()
ident = re.match(_ident, lines[0])
if ident is None:
continue
if 'ADOPTED LEVELS' in ident.group(2):
leveln = 0
brs = {}
level_found = False
for line in lines:
level_l = _level_regex.match(line)
if level_l is not None:
if len(brs) > 0:
for key, val in brs.items():
goodkey = True
keystrip = key.replace("%", "").lower()
for item in badlist:
if keystrip == item:
goodkey = False
if goodkey is True:
rx = rxname.id(keystrip)
branch_percent = float(val.split("(")[0])
levellist.append((nuc_id, rx, half_lifev,
level, branch_percent,
state, special))
if level_found is True:
levellist.append((nuc_id, 0, half_lifev, level, 0.0,
state, special))
brs = {}
level, half_lifev, from_nuc, state, special = \
_parse_level_record(level_l)
if from_nuc is not None:
nuc_id = from_nuc + leveln
leveln += 1
level_found = True
else:
level_found = False
continue
levelc = _level_cont_regex.match(line)
if levelc is not None:
brs.update(_parse_level_continuation_record(levelc))
continue
if len(brs) > 0:
for key, val in brs.items():
goodkey = True
keystrip = key.replace("%", "").lower()
for item in badlist:
if keystrip == item:
goodkey = False
if goodkey is True:
rx = rxname.id(keystrip)
branch_percent = float(val.split("(")[0])
levellist.append((nuc_id, rx, half_lifev, level,
branch_percent, state, special))
if level_found is True:
levellist.append((nuc_id, 0, half_lifev, level, 0.0, state,
special))
_adjust_ge100_branches(levellist)
_adjust_metastables(levellist)
_adjust_half_lives(levellist)
return levellist
def decays(filename, decaylist=None):
"""
This splits an ENSDF file into datasets. It then passes the dataset to the
appropriate parser. Currently only a subset of decay datasets are
supported. The output is a list of objects containing information
pertaining to a particular decay.
Parameters
----------
filename : str or file
Name of ENSDF formatted file or a file-like object containing ENSDF
formatted data
decaylist : list of tuples
This is a list object which all newly processed decays will be added
to. If it's None a new one will be created.
Returns
-------
decaylist : list of tuples
list of objects containing information pertaining to a particular
decay. This information is in the following format:
int
nuc_id of the parent
int
nuc_id of the daughter
int
PyNE reaction id
float
half-life in seconds
float
half-life error in seconds
float
branching ratio (percent)
float
Conversion factor for gamma intensity to photons per 100 decays of the
parent
float
Error in conversion factor for gamma intensity
float
Conversion factor for electron capture/beta intensity to electron
captures/betas per 100 decays of the parent
float
Error in conversion factor for electron capture/beta intensity
list
a list containing information about each gamma ray:
* starting level of gamma transition in stats_id form
* final level of gamma transition in state_id form
* original parent
* energy in keV
* uncertainty in energy
* intensity (multiply by conversion factor for percentage)
* uncertainty in intensity
* electron conversion intensity
* uncertainty in electron conversion intensity
* total transition intensity
* total transition intensity error
* k electron conversion intensity
* l electron conversion intensity
* m electron conversion intensity
list
a list containing information about each alpha:
* parent nuclide id in state_id form
* child nuclide id in state_id form
* alpha energy
* alpha intensity in percent of total alphas
list
a list containing information about each beta minus from the parent
decay:
* parent nuclide id in state_id form
* child nuclide id in state_id form
* beta endpoint energy
* beta average energy
* beta intensity (multiply by conversion factor for percentage)
list
a list containing information about each beta plus and electron capture
from the parent decay:
* parent nuclide id in state_id form
* child nuclide id in state_id form
* beta plus endpoint energy
* beta plus average energy
* beta intensity (multiply by conversion factor for percentage)
* electron capture intensity (multiply by conversion factor for
percentage)
* k electron conversion intensity
* l electron conversion intensity
* m electron conversion intensity
"""
if decaylist is None:
decaylist = []
if isinstance(filename, str):
with open(filename, 'r') as f:
dat = f.read()
else:
dat = filename.read()
datasets = dat.split(80 * " " + "\n")
for dataset in datasets:
lines = dataset.splitlines()
if len(lines) == 0:
continue
ident = re.match(_ident, lines[0])
if ident is None:
continue
if 'DECAY' in ident.group(2):
decay_s = ident.group(2).split()[1]
decay = _parse_decay_dataset(lines, decay_s)
if decay is not None:
if isinstance(decay[0], list):
if isinstance(decay[3], list):
for i, parent in enumerate(decay[0]):
dc = copy.deepcopy(list(decay))
dc[0] = parent
dc[3] = decay[3][i]
dc[4] = decay[4][i]
for gamma in dc[11]:
gamma[2] = parent
for alpha in dc[12]:
alpha[0] = parent
for beta in dc[13]:
beta[0] = parent
for ecbp in dc[14]:
ecbp[0] = parent
decaylist.append(tuple(dc))
else:
for parent in decay[0]:
dc = copy.deepcopy(list(decay))
dc[0] = parent
for gamma in dc[11]:
gamma[2] = parent
for alpha in dc[12]:
alpha[0] = parent
for beta in dc[13]:
beta[0] = parent
for ecbp in dc[14]:
ecbp[0] = parent
decaylist.append(tuple(dc))
else:
decaylist.append(decay)
return decaylist
def _dlist_gen(f):
"""
This compiles a list of decay types in an ensdf file
Parameters
----------
f : str
Name of ENSDF formatted file
Returns
-------
decaylist : list
list of decay types in the ENSDF file eg. ['B+','B-','A']
"""
if isinstance(f, str):
with open(f, 'r') as f:
dat = f.read()
else:
dat = f.read()
decaylist = []
datasets = dat.split(80 * " " + "\n")[0:-1]
for dataset in datasets:
lines = dataset.splitlines()
ident = re.match(_ident, lines[0])
if ident is not None:
if 'DECAY' in ident.group(2):
fin = ident.group(2).split()[1]
if fin not in decaylist:
decaylist.append(fin)
return decaylist
def _level_dlist_gen(f, keys):
"""
This compiles a list of decay types in an ensdf file
Parameters
----------
f : str
Name of ENSDF formatted file
Returns
-------
decaylist : list
list of decay types in the ENSDF file eg. ['B+','B-','A']
"""
if isinstance(f, str):
with open(f, 'r') as f:
dat = f.read()
else:
dat = f.read()
datasets = dat.split(80 * " " + "\n")[0:-1]
for dataset in datasets:
lines = dataset.splitlines()
ident = re.match(_ident, lines[0])
if ident is not None:
if 'ADOPTED LEVELS' in ident.group(2):
for line in lines:
levelc = _level_cont_regex.match(line)
if levelc is None:
continue
ddict = _parse_level_continuation_record(levelc)
for item in ddict.keys():
if item in keys:
continue
keys.append(item)
return keys
| 33.334668
| 97
| 0.536544
|
794cc524f6a9e71f0b9adc1779178e963fd32a1d
| 13,992
|
py
|
Python
|
eZmaxApi/model/list_save_listpresentation_v1_response.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
eZmaxApi/model/list_save_listpresentation_v1_response.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
eZmaxApi/model/list_save_listpresentation_v1_response.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.3
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from eZmaxApi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from eZmaxApi.exceptions import ApiAttributeError
def lazy_import():
from eZmaxApi.model.common_response import CommonResponse
from eZmaxApi.model.common_response_obj_debug import CommonResponseObjDebug
from eZmaxApi.model.common_response_obj_debug_payload import CommonResponseObjDebugPayload
globals()['CommonResponse'] = CommonResponse
globals()['CommonResponseObjDebug'] = CommonResponseObjDebug
globals()['CommonResponseObjDebugPayload'] = CommonResponseObjDebugPayload
class ListSaveListpresentationV1Response(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'obj_debug_payload': (CommonResponseObjDebugPayload,), # noqa: E501
'obj_debug': (CommonResponseObjDebug,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'obj_debug_payload': 'objDebugPayload', # noqa: E501
'obj_debug': 'objDebug', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ListSaveListpresentationV1Response - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
obj_debug_payload (CommonResponseObjDebugPayload): [optional] # noqa: E501
obj_debug (CommonResponseObjDebug): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ListSaveListpresentationV1Response - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
obj_debug_payload (CommonResponseObjDebugPayload): [optional] # noqa: E501
obj_debug (CommonResponseObjDebug): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
CommonResponse,
],
'oneOf': [
],
}
| 43.588785
| 121
| 0.593911
|
794cc55c6b5a7916d790f34a3ca7a661fd00268f
| 212
|
py
|
Python
|
examples/go_to_pose.py
|
nalbion/pycozmo
|
35ee1ea741ecf7a39affc38d4ff5ad17865fea16
|
[
"MIT"
] | 123
|
2019-08-25T21:28:23.000Z
|
2022-03-12T13:54:59.000Z
|
examples/go_to_pose.py
|
nalbion/pycozmo
|
35ee1ea741ecf7a39affc38d4ff5ad17865fea16
|
[
"MIT"
] | 41
|
2019-08-25T21:21:37.000Z
|
2022-02-09T14:20:54.000Z
|
examples/go_to_pose.py
|
nalbion/pycozmo
|
35ee1ea741ecf7a39affc38d4ff5ad17865fea16
|
[
"MIT"
] | 51
|
2019-09-04T13:30:02.000Z
|
2022-01-09T01:20:24.000Z
|
#!/usr/bin/env python
import pycozmo
with pycozmo.connect() as cli:
target = pycozmo.util.Pose(200, 100.0, 0.0, angle_z=pycozmo.util.Angle(degrees=0.0))
cli.go_to_pose(target, relative_to_robot=True)
| 21.2
| 88
| 0.721698
|
794cc57dbfb9a758719c1ec1a7b7c7ee8c48429d
| 4,683
|
py
|
Python
|
Utils/tdms_to_video_converter.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
Utils/tdms_to_video_converter.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
Utils/tdms_to_video_converter.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from tempfile import mkdtemp
from nptdms import TdmsFile
import psutil
import gc
import time
from multiprocessing.dummy import Pool as ThreadPool
from tqdm import tqdm
import cv2
class TDMs_to_Video():
""" current implementation: takes one .tdms video and saves it into as a number of .mp4 videos in a temp foldre"""
# TODO extract video parametrs from .tdms
# TODO deal with batch processing
# TODO Stitch .mp4s together
# TODO convert mp4 to avi
# TODO easier handling of saving destination
def __init__(self):
self.start_time = time.clock()
# Specify path to TDMS file and temp folder where to store data
# self.tempdir = mkdtemp(dir='D:\\')
self.tempdir = 'D:\\temp'
filefld = 'Z:\\rig_bigrig\\cameratest'
filename = 'Prot18-24-default-119418055-video.tdms'
self.filepath = os.path.join(self.tempdir, filename)
# HARDCODED variables about the video recorded
skip_data_points = 4094
self.real_width = 1936
self.width = self.real_width + 48
self.height = 1216
frame_size = self.width * self.height
self.real_frame_size = self.real_width * self.height
self.f_size = os.path.getsize(self.filepath) # size in bytes
self.tot_frames = int((self.f_size - skip_data_points) / frame_size) # num frames
self.iscolor = False # is the video RGB or greyscale
print('Total number of frames {}'.format(self.tot_frames))
# Number of parallel processes for faster writing to video
self.num_processes = 3
# load TDMS data
self.get_data()
# write to video
self.write_to_video()
# Print how long it took
print('It took {}s to process a file of {} bytes'.format(time.clock() - self.start_time, self.f_size))
####################################################################################################
def get_data(self):
""" loads data from the .tdms file """
print('Opening binary') # necessary, otherwise TdmsFile breaks. doesnt slow down process
bfile = open(self.filepath, 'rb')
self.show_mem_stats()
print('Opening mmap tdms')
tdms = TdmsFile(bfile, memmap_dir=self.tempdir) # open tdms binary file as a memmapped object
self.show_mem_stats()
# show data
# plt.figure()
# plt.plot(tdms.__dict__['objects']["/'cam0'/'data'"].data[0:10000])
print('Extracting data')
tdms = tdms.__dict__['objects']["/'cam0'/'data'"].data.reshape((self.tot_frames, self.height, self.width),
order='C')
self.show_mem_stats()
print('Got data, cleaning up cached memory')
gc.collect()
self.show_mem_stats()
# reshape data
self.tdms = tdms[:, :, :self.real_width]
def write_to_video(self):
""" writes frames data from self.tdms to .mp4 videos. Pooled for faster execution"""
if self.num_processes == 1:
self.write_clip([0, self.tot_frames])
else:
# Get frames range for each video writer that will run in parallel
steps = np.linspace(0, self.tot_frames, self.num_processes + 1).astype(int)
step = steps[1]
steps2 = np.asarray([x + step for x in steps])
limits = [s for s in zip(steps, steps2)][:-1]
# start writing
pool = ThreadPool(self.num_processes)
_ = pool.map(self.write_clip, limits)
@staticmethod
def show_mem_stats():
""" shows memory usage """
giga = 1073741824
stats = psutil.virtual_memory()
print("""Total memory: {} GB
available: {} GB
free: {} GB
used: {} GB
percent: {}%
""".format(round(stats.total/giga, 2), round(stats.available/giga, 2),
round(stats.free/giga, 2), round(stats.used/giga, 2), stats.percent))
return stats.available
def write_clip(self, limits):
""" create a .cv2 videowriter and start writing """
vidname = 'output_{}.mp4'.format(limits[0])
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
videowriter = cv2.VideoWriter(os.path.join(self.tempdir, vidname), fourcc,
120, (self.real_width, self.height), self.iscolor)
for framen in tqdm(range(limits[0], limits[1])):
videowriter.write(self.tdms[framen])
videowriter.release()
if __name__=="__main__":
converter = TDMs_to_Video()
| 37.766129
| 119
| 0.589366
|
794cc61879074a04e59f6b4c366d3e0b2e63547e
| 15,092
|
py
|
Python
|
src/util/util.py
|
Shoooooon/TensorOrder
|
6a390c34f5f05a4c28bcdf5429da0582f34d749a
|
[
"MIT"
] | 14
|
2020-01-31T23:02:39.000Z
|
2021-12-26T06:00:13.000Z
|
src/util/util.py
|
Shoooooon/TensorOrder
|
6a390c34f5f05a4c28bcdf5429da0582f34d749a
|
[
"MIT"
] | 3
|
2020-06-27T21:11:46.000Z
|
2020-06-27T21:11:47.000Z
|
src/util/util.py
|
Shoooooon/TensorOrder
|
6a390c34f5f05a4c28bcdf5429da0582f34d749a
|
[
"MIT"
] | 1
|
2021-05-28T05:12:43.000Z
|
2021-05-28T05:12:43.000Z
|
import click
import ctypes
import enum
import itertools
import os
import queue
import signal
import sys
import threading
import time
class TimeoutTimer:
"""
A convenient wrapper for triggering a TimeoutError after a given time.
There should only be a single TimeoutTimer object at a given time.
"""
def __init__(self, initial_timeout):
self._initial_timeout = initial_timeout
self._start_time = 0
self._end_time = 0
self._enabled = False
def __enter__(self):
"""
Start the timer.
:return: This timer
"""
def handler(signum, frame):
raise TimeoutError()
try:
signal.signal(signal.SIGALRM, handler)
if self._initial_timeout > 0:
signal.setitimer(signal.ITIMER_REAL, self._initial_timeout)
self._enabled = True
except AttributeError:
log(
"Unable to use signals; timeout will be less effective",
Verbosity.always,
)
self._start_time = time.time()
self._end_time = self._start_time + self._initial_timeout
return self
def recap_timeout(self, new_timeout):
"""
Set the new timeout of this Timer, measured from the start of the timer,
if the new timeout would trigger sooner.
:param new_timeout: The new timeout to set (0 indicates cancel)
:return: None
"""
if new_timeout == 0:
self.cancel()
return
new_time_remaining = self._start_time + new_timeout - time.time()
if new_time_remaining < 0:
self.cancel()
self._end_time = self._start_time + new_timeout
raise TimeoutError()
else:
try:
if signal.getitimer(signal.ITIMER_REAL)[0] > new_time_remaining:
signal.setitimer(signal.ITIMER_REAL, new_time_remaining)
self._enabled = True
except AttributeError:
pass
self._end_time = self._start_time + new_timeout
def reset_timeout(self, new_timeout):
"""
Set the new timeout of this Timer, measured from the start of the timer.
:param new_timeout: The new timeout to set (0 indicates cancel)
:return: None
"""
if new_timeout == 0:
self.cancel()
self._end_time = self._start_time + new_timeout
return
new_time_remaining = self._start_time + new_timeout - time.time()
if new_time_remaining < 0:
self.cancel()
self._end_time = self._start_time + new_timeout
raise TimeoutError()
else:
try:
signal.setitimer(signal.ITIMER_REAL, new_time_remaining)
self._enabled = True
except AttributeError:
pass
self._end_time = self._start_time + new_timeout
def __exit__(self, exit_type, value, traceback):
"""
Cancel the timer.
:return: None
"""
self.cancel()
def cancel(self):
"""
Cancel the timer.
:return: None
"""
try:
signal.setitimer(signal.ITIMER_REAL, 0)
self._enabled = False
except AttributeError:
pass
self._end_time = self._start_time
def expired(self):
return (time.time() > self._end_time) and self._enabled
class Stopwatch:
"""
A stopwatch for easy measurement of elapsed time, optionally split into intervals.
"""
def __init__(self):
self.__start = time.time()
self.__interval_start = self.__start
self.__records = {}
def record_interval(self, name):
"""
Record the time elapsed since the end of the last interval.
:param name: The name of the record to make
:return: None
"""
interval_end = time.time()
self.__records[name] = interval_end - self.__interval_start
self.__interval_start = interval_end
def record_total(self, name):
"""
Record the time elapsed since the creation of this stopwatch.
:param name: The name of the record to make
:return: None
"""
self.__records[name] = time.time() - self.__start
def elapsed_time(self):
"""
Return the time elapsed since the creation of this stopwatch.
:return: The time elapsed, in seconds
"""
return time.time() - self.__start
@property
def records(self):
return dict(self.__records)
def report_times(self):
for name, record in self.records.items():
if name == "Total":
output("-", Verbosity.plan_info)
output_pair(
name + " Time",
record,
Verbosity.always if name == "Total" else Verbosity.plan_info,
)
class TypedChoice(click.Choice):
"""
A modified version of click.Choice that allows the choice options to be arbitrary objects.
The argument is compared against the string representation of each object; if it matches,
then the object is returned.
As with click.Choice, you should only pass a list or tuple of choices. Other iterables
(like generators) may lead to surprising results.
:param case_sensitive: Set to false to make choices case insensitive. Defaults to true.
"""
name = "typedchoice"
def __init__(self, choices, case_sensitive=True):
self.object_choices = choices
click.Choice.__init__(
self, list(map(str, choices)), case_sensitive=case_sensitive
)
def convert(self, value, param, ctx):
# Exact match
if value in self.choices:
return self.object_choices[self.choices.index(value)]
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
# preserve original `value` to produce an accurate message in
# `self.fail`
normed_value = value
normed_choices = self.choices
if ctx is not None and ctx.token_normalize_func is not None:
normed_value = ctx.token_normalize_func(value)
normed_choices = [
ctx.token_normalize_func(choice) for choice in self.choices
]
if not self.case_sensitive:
normed_value = normed_value.lower()
normed_choices = [choice.lower() for choice in normed_choices]
if normed_value in normed_choices:
return self.object_choices[normed_choices.index(normed_value)]
self.fail(
"invalid choice: %s. (choose from %s)" % (value, ", ".join(self.choices)),
param,
ctx,
)
def __repr__(self):
return "TypedChoice(%r)" % list(self.choices)
class TaggedChoice(click.Choice):
"""
A modified version of click.Choice that allows the choice options to be provided as a
dictionary. The argument is compared against the keys of the dictionary; if it matches,
then the corresponding value is returned.
:param case_sensitive: Set to false to make choices case insensitive. Defaults to true.
"""
name = "taggedchoice"
def __init__(self, options, case_sensitive=True):
self.options = options
click.Choice.__init__(self, list(options.keys()), case_sensitive=case_sensitive)
def convert(self, value, param, ctx):
# Exact match
if value in self.options:
return self.options[value]
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
# preserve original `value` to produce an accurate message in
# `self.fail`
def normalize(val):
if ctx is not None and ctx.token_normalize_func is not None:
val = ctx.token_normalize_func(val)
if not self.case_sensitive:
val = val.lower()
return val
normalized_value = normalize(value)
for key in self.options:
if normalize(key) == normalized_value:
return self.options[key]
self.fail(
"invalid choice: %s. (choose from %s)" % (value, ", ".join(self.options)),
param,
ctx,
)
def __repr__(self):
return "TypedChoice(%r)" % list(self.choices)
class FileLocator:
"""
A class to aid in the lookup of files that may or may not be in a Singularity image.
Files in a Singularity image are relative to root.
Other files are relative to the local directory.
"""
def __getitem__(self, location):
if os.path.exists(location):
return location
elif os.path.exists("/" + location):
return "/" + location
elif os.path.exists("../" + location):
return "../" + location
else:
raise EnvironmentError("Unable to locate " + location)
class DimacsStream:
"""
A class to aid in parsing of a DIMACS-style filestream.
"""
def __init__(
self,
stream,
comment_prefixes=frozenset({"c", "O"}),
process_comment=lambda x: None,
):
"""
:param stream: Input stream to parse.
:param comment_prefixes: A set of characters of prefixes indicating a comment line.
:param process_comment: A method to call on all comments discovered during the parse.
"""
self.__stream = stream
self.__comment_prefixes = comment_prefixes
self.__process_comment = process_comment
def parse_line(self, allowed_prefixes=None):
"""
Locate and parse the next line of a DIMACS-style stream, ignoring comments.
Raises a RuntimeError if this line has an unexpected prefix.
:param allowed_prefixes: A set of characters of prefixes to allow.
:return: A list of space-separated elements of the next line, or None if EOF.
"""
for line in self.__stream:
if len(line) == 0:
continue
elif line[0] in self.__comment_prefixes:
self.__process_comment(line.rstrip())
continue
elif allowed_prefixes is None or line[0] in allowed_prefixes:
return line.split()
else:
raise RuntimeError("Unexpected line prefix in: {0}".format(line))
class Verbosity(enum.IntEnum):
always = 0
stages = 1
plan_info = 2
progress = 3
solver_output = 4
debug = 5
output_verbosity = Verbosity.debug
def set_verbosity(verbosity):
"""
Set the level of information to output, globally
:param verbosity: 0 (minimal), 1, 2, 3, 4, 5 (everything)
:return: None
"""
global output_verbosity
output_verbosity = verbosity
def output(arg, verbosity=Verbosity.debug):
"""
Output the text to stdout according to the global log level
:param arg: Text to output
:param verbosity: 0 (always), 1, 2, 3, 4, 5 (debug only)
:return: None
"""
global output_verbosity
if verbosity <= output_verbosity:
print(arg)
def output_pair(key, value, verbosity=Verbosity.debug, flush=True):
"""
Output the key/value pair to the global log level like "Key: Value"
:param key: Key to output
:param value: Value to output
:param verbosity: 0 (always), 1, 2, 3, 4, 5 (debug only)
:param flush: If true, flush stdout afterwards
:return: None
"""
global output_verbosity
if verbosity <= output_verbosity:
print(str(key) + ": " + str(value), flush=flush)
def log(arg, verbosity=Verbosity.debug, flush=True, **kwargs):
"""
Output the text to stderr according to the global log level
:param arg: Text to output
:param verbosity: 0 (always), 1, 2, 3, 4, 5 (debug only)
:param flush: If true, flush stderr afterwards
:param kwargs: Other arguments, passed to stderr
:return:
"""
if verbosity <= output_verbosity:
print(arg, flush=flush, file=sys.stderr, **kwargs)
def kill_on_crash(sig=None):
"""
Ensure that the child process is killed if the parent exits (e.g. from a cython segfault).
From https://stackoverflow.com/questions/320232/ensuring-subprocesses-are-dead-on-exiting-python-program
"""
if sig is None:
sig = signal.SIGKILL
def do():
libc = ctypes.CDLL("libc.so.6")
return libc.prctl(1, sig)
return do
class BufferedStream:
"""
Buffer the output of the stream through a queue on a separate thread.
An unbuffered process.stdout stream does not behave well with timeouts.
"""
def __init__(self, stream, timer=None):
self.__stream = stream
self.__timer = timer
self.__queue = queue.Queue()
self.__finished = False
def enqueue_output():
for line in self.__stream:
self.__queue.put(line)
self.__stream.close()
self.__finished = True
self.__thread = threading.Thread(target=enqueue_output)
self.__thread.daemon = True
self.__thread.start()
def __iter__(self):
return self
def __next__(self):
while True:
try:
if self.__timer is not None and self.__timer.expired():
# If the timer does not successfully go off (i.e., Windows), trigger it here
raise TimeoutError()
return self.__queue.get(block=True, timeout=1)
except queue.Empty:
if self.__finished:
raise StopIteration
class GroupedHelp(click.Command):
"""
Add high-level grouping to help command output
"""
def __init__(self, groups, **kwargs):
click.Command.__init__(self, **kwargs)
self.__groups = groups
def get_help(self, ctx):
help_text = click.Command.get_help(self, ctx)
for indicator, group_name in self.__groups.items():
argument = " --" + indicator
help_text = help_text.replace(
argument, "\n" + group_name + ":\n" + argument
)
return help_text
def split_every(iterable, n):
"""
Split the iterable into lists of size n.
Note that the final iterable may be < n, if n does not evenly divide the number of elements.
:param iterable: The iterable to split
:param n: Size of groups
:return: An iterator that yields lists of size <= n.
"""
i = iter(iterable)
piece = list(itertools.islice(i, n))
while piece:
yield piece
piece = list(itertools.islice(i, n))
def normalize_TPU_addr(addr):
"""
Ensure that a TPU addr always has the form grpc://.*:8470
:param addr:
:return:
"""
if not addr.startswith("grpc://"):
addr = "grpc://" + addr
if not addr.endswith(":8470"):
addr = addr + ":8470"
return addr
| 29.944444
| 108
| 0.606281
|
794cc6867141386e74c194e2df1578c20276c526
| 7,641
|
py
|
Python
|
vgio/duke3d/tests/test_map.py
|
joshuaskelly/game-tools
|
e71bcf4ef6553adf0b51f4379f72bc5a82a60176
|
[
"MIT"
] | 22
|
2017-11-30T22:13:50.000Z
|
2019-12-19T17:56:40.000Z
|
vgio/duke3d/tests/test_map.py
|
joshuaskelly/vgio
|
e71bcf4ef6553adf0b51f4379f72bc5a82a60176
|
[
"MIT"
] | 22
|
2019-08-11T05:07:26.000Z
|
2020-12-30T16:07:04.000Z
|
vgio/duke3d/tests/test_map.py
|
joshuaskelly/game-tools
|
e71bcf4ef6553adf0b51f4379f72bc5a82a60176
|
[
"MIT"
] | 4
|
2018-06-24T14:04:36.000Z
|
2019-05-14T06:01:51.000Z
|
import unittest
from vgio.duke3d.tests.basecase import TestCase
from vgio.duke3d import map
class TestMapReadWrite(TestCase):
def test_check_file_type(self):
self.assertFalse(map.is_mapfile('./test_data/test.art'))
def test_sector(self):
s0 = map.Sector(
wall_pointer=1,
wall_number=2,
ceiling_z=3,
floor_z=4,
ceiling_stat=5,
floor_stat=6,
ceiling_picnum=7,
ceiling_heinum=8,
ceiling_shade=9,
ceiling_palette=10,
ceiling_x_panning=11,
ceiling_y_panning=12,
floor_picnum=13,
floor_heinum=14,
floor_shade=15,
floor_palette=16,
floor_x_panning=17,
floor_y_panning=18,
visibility=19,
lotag=20,
hitag=21,
extra=22
)
map.Sector.write(self.buff, s0)
self.buff.seek(0)
s1 = map.Sector.read(self.buff)
self.assertEqual(s0.wall_pointer, s1.wall_pointer, 'Wall_pointer values should be equal')
self.assertEqual(s0.wall_number, s1.wall_number, 'Wall_number values should be equal')
self.assertEqual(s0.ceiling_z, s1.ceiling_z, 'Ceiling_z values should be equal')
self.assertEqual(s0.floor_z, s1.floor_z, 'Floor_z values should be equal')
self.assertEqual(s0.ceiling_stat, s1.ceiling_stat, 'Ceiling_stat values should be equal')
self.assertEqual(s0.floor_stat, s1.floor_stat, 'Floor_stat values should be equal')
self.assertEqual(s0.ceiling_picnum, s1.ceiling_picnum, 'Ceiling_picnum values should be equal')
self.assertEqual(s0.ceiling_heinum, s1.ceiling_heinum, 'Ceiling_heinum values should be equal')
self.assertEqual(s0.ceiling_shade, s1.ceiling_shade, 'Ceiling_shade values should be equal')
self.assertEqual(s0.ceiling_palette, s1.ceiling_palette, 'Ceiling_palette values should be equal')
self.assertEqual(s0.ceiling_x_panning, s1.ceiling_x_panning, 'Ceiling_x_panning values should be equal')
self.assertEqual(s0.ceiling_y_panning, s1.ceiling_y_panning, 'Ceiling_y_panning values should be equal')
self.assertEqual(s0.floor_picnum, s1.floor_picnum, 'Floor_picnum values should be equal')
self.assertEqual(s0.floor_heinum, s1.floor_heinum, 'Floor_heinum values should be equal')
self.assertEqual(s0.floor_shade, s1.floor_shade, 'Floor_shade values should be equal')
self.assertEqual(s0.floor_palette, s1.floor_palette, 'Floor_palette values should be equal')
self.assertEqual(s0.floor_x_panning, s1.floor_x_panning, 'Floor_x_panning values should be equal')
self.assertEqual(s0.floor_y_panning, s1.floor_y_panning, 'Floor_y_panning values should be equal')
self.assertEqual(s0.visibility, s1.visibility, 'Visibility values should be equal')
self.assertEqual(s0.lotag, s1.lotag, 'Lotag values should be equal')
self.assertEqual(s0.hitag, s1.hitag, 'Hitag values should be equal')
self.assertEqual(s0.extra, s1.extra, 'Extra values should be equal')
def test_wall(self):
w0 = map.Wall(
x=0,
y=1,
point2=2,
next_wall=-1,
next_sector=4,
cstat=5,
picnum=6,
over_picnum=7,
shade=8,
palette=9,
x_repeat=10,
y_repeat=11,
x_panning=12,
y_panning=13,
lotag=14,
hitag=15,
extra=16
)
map.Wall.write(self.buff, w0)
self.buff.seek(0)
w1 = map.Wall.read(self.buff)
self.assertEqual(w0.x, w1.x, 'X values should be equal')
self.assertEqual(w0.y, w1.y, 'Y values should be equal')
self.assertEqual(w0.point2, w1.point2, 'Point2 values should be equal')
self.assertEqual(w0.next_wall, w1.next_wall, 'Next_wall values should be equal')
self.assertEqual(w0.next_sector, w1.next_sector, 'Next_sector values should be equal')
self.assertEqual(w0.cstat, w1.cstat, 'Cstat values should be equal')
self.assertEqual(w0.picnum, w1.picnum, 'Picnum values should be equal')
self.assertEqual(w0.over_picnum, w1.over_picnum, 'Over_picnum values should be equal')
self.assertEqual(w0.shade, w1.shade, 'Shade values should be equal')
self.assertEqual(w0.palette, w1.palette, 'Palette values should be equal')
self.assertEqual(w0.x_repeat, w1.x_repeat, 'X_repeat values should be equal')
self.assertEqual(w0.y_repeat, w1.y_repeat, 'Y_repeat values should be equal')
self.assertEqual(w0.x_panning, w1.x_panning, 'X_panning values should be equal')
self.assertEqual(w0.y_panning, w1.y_panning, 'Y_panning values should be equal')
self.assertEqual(w0.lotag, w1.lotag, 'Lotag values should be equal')
self.assertEqual(w0.hitag, w1.hitag, 'Hitag values should be equal')
self.assertEqual(w0.extra, w1.extra, 'Extra values should be equal')
def test_sprite(self):
s0 = map.Sprite(
x=0,
y=1,
z=2,
cstat=3,
picnum=4,
shade=5,
palette=6,
clip_distance=7,
x_repeat=8,
y_repeat=9,
x_offset=10,
y_offset=11,
sector_number=12,
status_number=13,
angle=14,
owner=15,
x_velocity=16,
y_velocity=17,
z_velocity=18,
lotag=19,
hitag=20,
extra=21
)
map.Sprite.write(self.buff, s0)
self.buff.seek(0)
s1 = map.Sprite.read(self.buff)
self.assertEqual(s0.x, s1.x, 'X values should be equal')
self.assertEqual(s0.y, s1.y, 'Y values should be equal')
self.assertEqual(s0.z, s1.z, 'Z values should be equal')
self.assertEqual(s0.cstat, s1.cstat, 'Cstat values should be equal')
self.assertEqual(s0.picnum, s1.picnum, 'Picnum values should be equal')
self.assertEqual(s0.shade, s1.shade, 'Shade values should be equal')
self.assertEqual(s0.palette, s1.palette, 'Palette values should be equal')
self.assertEqual(s0.clip_distance, s1.clip_distance, 'Clip_distance values should be equal')
self.assertEqual(s0.x_repeat, s1.x_repeat, 'X_repeat values should be equal')
self.assertEqual(s0.y_repeat, s1.y_repeat, 'Y_repeat values should be equal')
self.assertEqual(s0.x_offset, s1.x_offset, 'X_offset values should be equal')
self.assertEqual(s0.y_offset, s1.y_offset, 'Y_offset values should be equal')
self.assertEqual(s0.sector_number, s1.sector_number, 'Sector_number values should be equal')
self.assertEqual(s0.status_number, s1.status_number, 'Status_number values should be equal')
self.assertEqual(s0.angle, s1.angle, 'Angle values should be equal')
self.assertEqual(s0.owner, s1.owner, 'Owner values should be equal')
self.assertEqual(s0.x_velocity, s1.x_velocity, 'X_velocity values should be equal')
self.assertEqual(s0.y_velocity, s1.y_velocity, 'Y_velocity values should be equal')
self.assertEqual(s0.z_velocity, s1.z_velocity, 'Z_velocity values should be equal')
self.assertEqual(s0.lotag, s1.lotag, 'Lotag values should be equal')
self.assertEqual(s0.hitag, s1.hitag, 'Hitag values should be equal')
self.assertEqual(s0.extra, s1.extra, 'Extra values should be equal')
if __name__ == '__main__':
unittest.main()
| 46.591463
| 112
| 0.647297
|
794cc6867b96c425546f7a5ec84cbdf35de7c533
| 3,554
|
py
|
Python
|
homeassistant/components/tplink/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 4
|
2019-07-03T22:36:57.000Z
|
2019-08-10T15:33:25.000Z
|
homeassistant/components/tplink/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 7
|
2019-08-23T05:26:02.000Z
|
2022-03-11T23:57:18.000Z
|
homeassistant/components/tplink/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 2
|
2018-08-15T03:59:35.000Z
|
2018-10-18T12:20:05.000Z
|
"""Component to embed TP-Link smart home devices."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_HOST
from homeassistant import config_entries
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .common import (
async_discover_devices,
get_static_devices,
ATTR_CONFIG,
CONF_DIMMER,
CONF_DISCOVERY,
CONF_LIGHT,
CONF_SWITCH,
SmartDevices
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'tplink'
TPLINK_HOST_SCHEMA = vol.Schema({
vol.Required(CONF_HOST): cv.string
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_LIGHT, default=[]): vol.All(
cv.ensure_list,
[TPLINK_HOST_SCHEMA]
),
vol.Optional(CONF_SWITCH, default=[]): vol.All(
cv.ensure_list,
[TPLINK_HOST_SCHEMA]
),
vol.Optional(CONF_DIMMER, default=[]): vol.All(
cv.ensure_list,
[TPLINK_HOST_SCHEMA]
),
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the TP-Link component."""
conf = config.get(DOMAIN)
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_CONFIG] = conf
if conf is not None:
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT}))
return True
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigType):
"""Set up TPLink from a config entry."""
config_data = hass.data[DOMAIN].get(ATTR_CONFIG)
# These will contain the initialized devices
lights = hass.data[DOMAIN][CONF_LIGHT] = []
switches = hass.data[DOMAIN][CONF_SWITCH] = []
# Add static devices
static_devices = SmartDevices()
if config_data is not None:
static_devices = get_static_devices(
config_data,
)
lights.extend(static_devices.lights)
switches.extend(static_devices.switches)
# Add discovered devices
if config_data is None or config_data[CONF_DISCOVERY]:
discovered_devices = await async_discover_devices(hass, static_devices)
lights.extend(discovered_devices.lights)
switches.extend(discovered_devices.switches)
forward_setup = hass.config_entries.async_forward_entry_setup
if lights:
_LOGGER.debug(
"Got %s lights: %s",
len(lights),
", ".join([d.host for d in lights])
)
hass.async_create_task(forward_setup(config_entry, 'light'))
if switches:
_LOGGER.debug(
"Got %s switches: %s",
len(switches),
", ".join([d.host for d in switches])
)
hass.async_create_task(forward_setup(config_entry, 'switch'))
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
forward_unload = hass.config_entries.async_forward_entry_unload
remove_lights = remove_switches = False
if hass.data[DOMAIN][CONF_LIGHT]:
remove_lights = await forward_unload(entry, 'light')
if hass.data[DOMAIN][CONF_SWITCH]:
remove_switches = await forward_unload(entry, 'switch')
if remove_lights or remove_switches:
hass.data[DOMAIN].clear()
return True
# We were not able to unload the platforms, either because there
# were none or one of the forward_unloads failed.
return False
| 28.66129
| 79
| 0.66798
|
794cc6ec1d3975fbc59464027d957f40d9223528
| 5,229
|
py
|
Python
|
instagram/views.py
|
xamaan585/InstaClone
|
4c1b41c2c77cfc04808d339db7ed7e337c36cea3
|
[
"Unlicense"
] | null | null | null |
instagram/views.py
|
xamaan585/InstaClone
|
4c1b41c2c77cfc04808d339db7ed7e337c36cea3
|
[
"Unlicense"
] | null | null | null |
instagram/views.py
|
xamaan585/InstaClone
|
4c1b41c2c77cfc04808d339db7ed7e337c36cea3
|
[
"Unlicense"
] | null | null | null |
from django.http import HttpResponse,Http404,HttpResponseRedirect
import datetime as dt
from django.shortcuts import render,redirect,get_object_or_404
from .models import Follow, Image,Profile,Comments
from django.contrib.auth.models import User
from .forms import NewsLetterForm, UpdateUserForm, UpdateUserProfileForm, UserRegisterForm,PostForm,CommentForm
# from .email import send_welcome_email
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.urls import reverse
@login_required(login_url='/accounts/login/')
def index(request):
posts= Image.objects.all()
comments = Comments.objects.all()
all_users = User.objects.exclude(id=request.user.id)
current_user = request.user
if request.method == 'POST':
post_form = PostForm(request.POST, request.FILES)
if post_form.is_valid():
post = post_form.save(commit=False)
post.user = request.user
post.save()
return HttpResponseRedirect(reverse("home"))
else:
post_form = PostForm()
return render(request, 'all-instagram/home.html',{'posts': posts,'post_form': post_form,'all_users': all_users,'comments':comments,'current_user':current_user} )
def register(request):
if request.user.is_authenticated:
#redirect user to the profile page
return redirect('home')
if request.method=="POST":
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request,f'Account created for {username}!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request,"registration/register.html",{'form':form})
@login_required(login_url='login')
def profile(request, username):
images = request.user.images.all()
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user.profile)
profile_form = UpdateUserProfileForm(request.POST, request.FILES, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return HttpResponseRedirect(request.path_info)
else:
user_form = UpdateUserForm(instance=request.user)
profile_form = UpdateUserProfileForm()
return render(request, 'all-instagram/profile.html', {'user_form':user_form,'profile_form':profile_form,'images':images})
@login_required(login_url='login')
def comment(request, id):
image = Image.objects.get(id=id)
comments = Comments.objects.all()
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
new_comment = form.save(commit=False)
new_comment.image = image
new_comment.user = request.user.profile
new_comment.save()
return HttpResponseRedirect(request.path_info)
else:
form = CommentForm()
return render(request, 'all-instagram/post.html', {'post': image,'form': form,'comments':comments})
@login_required(login_url='login')
def unfollow(request, to_unfollow):
if request.method == 'GET':
unfollow_profile = Profile.objects.get(pk=to_unfollow)
new_unfollowed = Follow.objects.filter(follower=request.user.profile, followed=unfollow_profile)
new_unfollowed.delete()
return redirect('user_profile', unfollow_profile.user.username)
@login_required(login_url='login')
def follow(request, to_follow):
if request.method == 'GET':
follow_profile = Profile.objects.get(pk=to_follow)
new_following = Follow(follower=request.user.profile, followed=follow_profile)
new_following.save()
return redirect('user_profile', follow_profile.user.username)
@login_required(login_url='login')
def user_profile(request, username):
user_poster = get_object_or_404(User, username=username)
if request.user == user_poster:
return redirect('profile', username=request.user.username)
user_posts = user_poster.images.all()
followers = Follow.objects.filter(followed=user_poster.profile)
if_follow = None
for follower in followers:
if request.user.profile == follower.follower:
if_follow = True
else:
if_follow = False
print(followers)
return render(request, 'all-instagram/poster.html', {'user_poster': user_poster,'followers': followers, 'if_follow': if_follow,'user_posts':user_posts})
@login_required(login_url='login')
def like(request, id):
post = Image.objects.get(id = id)
post.likes += 1
post.save()
return HttpResponseRedirect(reverse("home"))
@login_required(login_url='login')
def search(request):
profiles = User.objects.all()
if 'username' in request.GET and request.GET['username']:
search_term = request.GET.get('username')
results = User.objects.filter(username__icontains=search_term)
print(results)
return render(request, 'all-instagram/users.html',locals())
return redirect(index)
| 36.566434
| 165
| 0.689807
|
794cc77f613223557804595389a16d28c8a63cb6
| 4,950
|
py
|
Python
|
trax/layers/__init__.py
|
pkozakowski/trax
|
31215c378017347e0b66ba51c37cd3cbedf60b17
|
[
"Apache-2.0"
] | 1
|
2021-03-09T10:47:00.000Z
|
2021-03-09T10:47:00.000Z
|
trax/layers/__init__.py
|
pkozakowski/trax
|
31215c378017347e0b66ba51c37cd3cbedf60b17
|
[
"Apache-2.0"
] | null | null | null |
trax/layers/__init__.py
|
pkozakowski/trax
|
31215c378017347e0b66ba51c37cd3cbedf60b17
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Layers: trainable functions as neural network building blocks."""
import gin
# We create a flat layers.* namespace for uniform calling conventions as we
# upstream changes.
# pylint: disable=wildcard-import
from trax.layers.acceleration import *
from trax.layers.activation_fns import *
from trax.layers.assert_shape import *
from trax.layers.attention import *
from trax.layers.base import *
from trax.layers.combinators import *
from trax.layers.convolution import *
from trax.layers.core import *
from trax.layers.deconvolution import *
from trax.layers.initializers import *
from trax.layers.metrics import *
from trax.layers.normalization import *
from trax.layers.pooling import *
from trax.layers.research.efficient_attention import *
from trax.layers.research.position_encodings import *
from trax.layers.research.rel_attention import *
from trax.layers.research.sparsity import *
from trax.layers.reversible import *
from trax.layers.rnn import *
# Ginify
def layer_configure(*args, **kwargs):
kwargs['module'] = 'trax.layers'
return gin.external_configurable(*args, **kwargs)
# pylint: disable=used-before-assignment
# pylint: disable=invalid-name
Relu = layer_configure(Relu)
Gelu = layer_configure(Gelu)
FastGelu = layer_configure(FastGelu)
Sigmoid = layer_configure(Sigmoid)
Tanh = layer_configure(Tanh)
HardSigmoid = layer_configure(HardSigmoid)
HardTanh = layer_configure(HardTanh)
Exp = layer_configure(Exp)
LogSoftmax = layer_configure(LogSoftmax)
Softmax = layer_configure(Softmax)
Softplus = layer_configure(Softplus)
L2Loss = layer_configure(L2Loss)
LSTMCell = layer_configure(LSTMCell)
GRUCell = layer_configure(GRUCell)
BatchNorm = layer_configure(BatchNorm)
LayerNorm = layer_configure(LayerNorm)
FilterResponseNorm = layer_configure(FilterResponseNorm)
ThresholdedLinearUnit = layer_configure(ThresholdedLinearUnit)
Attention = layer_configure(Attention, denylist=['mode'])
CausalAttention = layer_configure(CausalAttention, denylist=['mode'])
FavorAttention = layer_configure(FavorAttention, denylist=['mode'])
Favor = layer_configure(Favor, denylist=['mode'])
CausalFavor = layer_configure(CausalFavor, denylist=['mode'])
CausalFavorAttention = layer_configure(CausalFavorAttention, denylist=['mode'])
DotProductCausalAttention = layer_configure(
DotProductCausalAttention, denylist=['mode'])
SelfAttention = layer_configure(SelfAttention, denylist=['mode'])
ModularCausalAttention = layer_configure(ModularCausalAttention,
denylist=['mode'])
LowRankCausalAttention = layer_configure(LowRankCausalAttention,
denylist=['mode'])
MultiplicativeCausalAttention = layer_configure(MultiplicativeCausalAttention,
denylist=['mode'])
MultiplicativeModularCausalAttention = layer_configure(
MultiplicativeModularCausalAttention, denylist=['mode'])
ConvCausalAttention = layer_configure(ConvCausalAttention, denylist=['mode'])
MultiplicativeConvCausalAttention = layer_configure(
MultiplicativeConvCausalAttention, denylist=['mode'])
ConvTranspose = layer_configure(ConvTranspose)
LSHSelfAttention = layer_configure(LSHSelfAttention, denylist=['mode'])
PureLSHSelfAttention = layer_configure(PureLSHSelfAttention, denylist=['mode'])
MixedLSHSelfAttention = layer_configure(
MixedLSHSelfAttention, denylist=['mode'])
PureLSHSelfAttentionWrapper = layer_configure(
PureLSHSelfAttentionWrapper, denylist=['mode'])
EncDecAttention = layer_configure(EncDecAttention, denylist=['mode'])
InfinitePositionalEncoding = layer_configure(
InfinitePositionalEncoding, denylist=['mode'])
TimeBinPositionalEncoding = layer_configure(
TimeBinPositionalEncoding, denylist=['mode'])
AtariConvInit = layer_configure(AtariConvInit)
CrossEntropyLossWithLogSoftmax = layer_configure(CrossEntropyLossWithLogSoftmax)
WeightedCategoryAccuracy = layer_configure(WeightedCategoryAccuracy)
SequenceAccuracy = layer_configure(SequenceAccuracy)
CategoryCrossEntropy = layer_configure(CategoryCrossEntropy)
WeightedCategoryCrossEntropy = layer_configure(WeightedCategoryCrossEntropy)
MacroAveragedFScore = layer_configure(MacroAveragedFScore)
RelativeAttentionLayer = layer_configure(RelativeAttentionLayer)
RelativeAttentionLMLayer = layer_configure(RelativeAttentionLMLayer)
| 43.421053
| 80
| 0.79798
|
794cc8222efc78e63ae98bc7e69eca2e83345867
| 2,137
|
py
|
Python
|
19-monster-messages/test_solution19.py
|
johntelforduk/advent-of-code-2020
|
138df3a7b12e418f371f641fed02e57a98a7392e
|
[
"MIT"
] | 1
|
2020-12-03T13:20:49.000Z
|
2020-12-03T13:20:49.000Z
|
19-monster-messages/test_solution19.py
|
johntelforduk/advent-of-code-2020
|
138df3a7b12e418f371f641fed02e57a98a7392e
|
[
"MIT"
] | null | null | null |
19-monster-messages/test_solution19.py
|
johntelforduk/advent-of-code-2020
|
138df3a7b12e418f371f641fed02e57a98a7392e
|
[
"MIT"
] | null | null | null |
# Unit tests for day 19 of AOC 2020, Monster Messages.
from solution19 import rule_to_regex, message_match_regexp
import unittest
class TestFunctions(unittest.TestCase):
def test_functions(self):
test_regex = rule_to_regex(rules={'0': ['"a"']}, rule_number='0')
self.assertEqual(test_regex, 'a')
self.assertTrue(message_match_regexp(message='a', regexp=test_regex))
self.assertFalse(message_match_regexp(message='b', regexp=test_regex))
self.assertFalse(message_match_regexp(message='ab', regexp=test_regex))
self.assertFalse(message_match_regexp(message='ba', regexp=test_regex))
test_regex = rule_to_regex(rules={'0': ['1 2 3'], '1': ['4 5'], '4': ['"a"'],
'5': ['"b"'], '2': ['"c"'], '3': ['"d"']},
rule_number='0')
self.assertTrue(message_match_regexp(message='abcd', regexp=test_regex))
test_regex = rule_to_regex(rules={'0': ['1 2'], '1': ['"a"'], '2': ['1 3', '3 1'], '3': ['"b"']},
rule_number='0')
self.assertTrue(message_match_regexp(message='aab', regexp=test_regex))
self.assertTrue(message_match_regexp(message='aba', regexp=test_regex))
self.assertFalse(message_match_regexp(message='baa', regexp=test_regex))
self.assertFalse(message_match_regexp(message='abb', regexp=test_regex))
test_regex = rule_to_regex(rules={'0': ['4 1 5'], '1': ['2 3', '3 2'], '2': ['4 4', '5 5'],
'3': ['4 5', '5 4'], '4': ['"a"'], '5': ['"b"']},
rule_number='0')
self.assertTrue(message_match_regexp(message='ababbb', regexp=test_regex))
self.assertTrue(message_match_regexp(message='abbbab', regexp=test_regex))
self.assertFalse(message_match_regexp(message='bababa', regexp=test_regex))
self.assertFalse(message_match_regexp(message='aaabbb', regexp=test_regex))
self.assertFalse(message_match_regexp(message='aaaabbb', regexp=test_regex))
if __name__ == '__main__':
unittest.main()
| 50.880952
| 105
| 0.60365
|
794cc901eb3e1683819050817ea3bde13d4933c2
| 1,629
|
py
|
Python
|
learning/setup.py
|
dibakch/differential-privacy
|
ae9c6b6d5b7e772837ae336d1b3092683481ec16
|
[
"Apache-2.0"
] | 2,550
|
2019-09-04T13:13:24.000Z
|
2022-03-31T16:05:50.000Z
|
learning/setup.py
|
fbalicchia/differential-privacy
|
099080e49c4c047802d785bc818898c0caf84d45
|
[
"Apache-2.0"
] | 90
|
2019-09-10T15:37:10.000Z
|
2022-03-28T12:55:03.000Z
|
learning/setup.py
|
fbalicchia/differential-privacy
|
099080e49c4c047802d785bc818898c0caf84d45
|
[
"Apache-2.0"
] | 324
|
2019-09-05T11:52:06.000Z
|
2022-03-31T03:30:26.000Z
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for DP Learning package."""
import os
import setuptools
here = os.path.dirname(os.path.abspath(__file__))
def _parse_requirements(path):
"""Parses requirements from file."""
with open(os.path.join(here, path)) as f:
return [line.rstrip() for line in f] + ["dp-accounting"]
setuptools.setup(
name="dp-learning",
author="Google Differential Privacy Team",
author_email="dp-open-source@google.com",
description="Differential privacy learning algorithms",
long_description_content_type="text/markdown",
url="https://github.com/google/differential-privacy/",
packages=setuptools.find_packages(),
install_requires=_parse_requirements("requirements.txt"),
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
],
python_requires=">=3.7",
license="Apache 2.0",
keywords="differential-privacy clustering",
)
| 34.659574
| 74
| 0.712093
|
794cc97c324a12c8dfed190e7a372f4af1d97c3c
| 2,731
|
py
|
Python
|
laboratorios/models.py
|
tacianosilva/gestorlab
|
ca18b2b442ea1ae814f87cb4b4624ec9331fe062
|
[
"MIT"
] | null | null | null |
laboratorios/models.py
|
tacianosilva/gestorlab
|
ca18b2b442ea1ae814f87cb4b4624ec9331fe062
|
[
"MIT"
] | 1
|
2020-07-23T13:39:26.000Z
|
2020-07-23T13:39:26.000Z
|
laboratorios/models.py
|
tacianosilva/gestorlab
|
ca18b2b442ea1ae814f87cb4b4624ec9331fe062
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from django.conf import settings
from django.db import models
from django.utils.text import slugify
class Departamento(models.Model):
"""
Um departamento tem identificador, código, nome, sigla, endereço e site.
"""
id_unidade = models.IntegerField(unique=True)
codigo = models.IntegerField(unique=True)
nome = models.CharField(max_length=200, unique=True)
sigla = models.CharField(max_length=15, unique=True)
endereco = models.CharField(max_length=250, blank=True, null=True)
site = models.CharField(max_length=250, blank=True, null=True)
centro = models.CharField(max_length=200)
centro_sigla = models.CharField(max_length=25)
def get_absolute_url(self):
return reverse('depart_detail', kwargs={'pk': self.pk})
def __str__(self):
return self.nome + ' - ' + self.sigla + '/' + self.centro_sigla
class Docente(models.Model):
siape = models.IntegerField(unique=True)
nome = models.CharField(max_length=200)
formacao = models.CharField(max_length=50)
departamento = models.ForeignKey(Departamento, on_delete=models.PROTECT, null=True)
usuario = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True
)
@property
def primeiro_nome(self):
split_nome = self.nome.split(' ')
return split_nome[0]
def siglas_str(self):
siglas = ''
if self.departamento:
siglas = ' - ' + self.departamento.sigla
if self.departamento.centro_sigla:
siglas = siglas + '/' + self.departamento.centro_sigla
return siglas
def __str__(self):
return self.nome + ' (' + str(self.siape) + ')' + self.siglas_str()
class Laboratorio(models.Model):
nome = models.CharField(max_length=150)
sigla = models.CharField(max_length=25, unique=True)
slug = models.SlugField(max_length=100, unique=True, blank=True)
descricao = models.TextField()
departamento = models.ForeignKey(Departamento, on_delete=models.PROTECT, null=True, blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.sigla)
super(Laboratorio, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('laboratorio_detail', args=[self.slug])
def __str__(self):
return self.sigla
class LinhaPesquisa(models.Model):
nome = models.CharField(max_length=150)
descricao = models.TextField()
areaCNPQ = models.CharField(max_length=150)
subAreaCNPQ = models.CharField(max_length=150)
laboratorio = models.ForeignKey(Laboratorio, on_delete=models.CASCADE)
def __str__(self):
return self.nome
| 32.903614
| 99
| 0.683266
|
794cca7257a339d3947189ff1e6fd8a746d43e43
| 9,216
|
py
|
Python
|
setup.py
|
ASDen/horovod
|
7b5346e233395449f0d1132a789d7eeffcce1776
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ASDen/horovod
|
7b5346e233395449f0d1132a789d7eeffcce1776
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ASDen/horovod
|
7b5346e233395449f0d1132a789d7eeffcce1776
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright Microsoft
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
import subprocess
import sys
import textwrap
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from horovod import __version__
_FRAMEWORK_METADATA_FILE = 'horovod/metadata.json'
class CMakeExtension(Extension):
def __init__(self, name, cmake_lists_dir='.', sources=None, **kwa):
if sources is None:
sources = []
Extension.__init__(self, name, sources=sources, **kwa)
self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)
tensorflow_mpi_lib = CMakeExtension('horovod.tensorflow.mpi_lib',
cmake_lists_dir='.', sources=[])
torch_mpi_lib_v2 = CMakeExtension('horovod.torch.mpi_lib_v2',
cmake_lists_dir='.', sources=[])
mxnet_mpi_lib = CMakeExtension('horovod.mxnet.mpi_lib',
cmake_lists_dir='.', sources=[])
def is_build_action():
if len(sys.argv) <= 1:
return False
if sys.argv[1].startswith('build'):
return True
if sys.argv[1].startswith('bdist'):
return True
if sys.argv[1].startswith('install'):
return True
if sys.argv[1].startswith('develop'):
return True
def get_cmake_bin():
return os.environ.get('HOROVOD_CMAKE', 'cmake')
class custom_build_ext(build_ext):
def build_extensions(self):
if os.getenv('HOROVOD_SKIP_COMPILE') == '1':
# Skip building extensions using CMake
print("Horovod is being installed without native libraries")
return
cmake_bin = get_cmake_bin()
config = 'Debug' if self.debug or os.environ.get('HOROVOD_DEBUG') == "1" else 'RelWithDebInfo'
ext_name = self.extensions[0].name
build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')
build_dir = os.path.abspath(build_dir)
cmake_args = ['-DCMAKE_BUILD_TYPE=' + config,
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(config.upper(), build_dir),
'-DPYTHON_EXECUTABLE:FILEPATH=' + sys.executable]
make_args = ['-j8'] if not os.environ.get('MAKEFLAGS') else []
if self.verbose:
make_args.append('VERBOSE=1')
cmake_build_args = ['--config', config]
if make_args:
# -- specifies that these args are going to the native build tool: make
cmake_build_args += ['--'] + make_args
cmake_build_dir = os.path.join(self.build_temp, config)
if not os.path.exists(cmake_build_dir):
os.makedirs(cmake_build_dir)
config_and_build_commands = [
[cmake_bin, self.extensions[0].cmake_lists_dir] + cmake_args,
[cmake_bin, '--build', '.'] + cmake_build_args
]
if self.verbose:
print(f"Running CMake in {cmake_build_dir}:")
for command in config_and_build_commands:
print(" ".join(command))
sys.stdout.flush()
# Config and build the extension
try:
for command in config_and_build_commands:
subprocess.check_call(command, cwd=cmake_build_dir)
except OSError as e:
raise RuntimeError('CMake failed: {}'.format(str(e)))
if sys.argv[1].startswith('develop'):
# Copy over metadata.json file from build directory
shutil.copyfile(os.path.join(build_dir, _FRAMEWORK_METADATA_FILE),
os.path.join(self.extensions[0].cmake_lists_dir, _FRAMEWORK_METADATA_FILE))
# Remove unfound frameworks, otherwise develop mode will fail the install
self.extensions = [x for x in self.extensions if os.path.exists(self.get_ext_fullpath(x.name))]
# python packages required to use horovod in general
require_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<"3.7"']
# framework dependencies
tensorflow_require_list = ['tensorflow']
tensorflow_cpu_require_list = ['tensorflow-cpu']
tensorflow_gpu_require_list = ['tensorflow-gpu']
keras_require_list = ['keras>=2.0.8,!=2.0.9,!=2.1.0,!=2.1.1']
# pytorch-lightning 1.3.8 is a stable version to work with horovod
pytorch_require_list = ['torch', 'pytorch_lightning==1.3.8']
mxnet_require_list = ['mxnet>=1.4.1']
pyspark_require_list = ['pyspark>=2.3.2;python_version<"3.8"',
'pyspark>=3.0.0;python_version>="3.8"']
spark_require_list = ['numpy', 'petastorm>=0.11.0', 'pyarrow>=0.15.0', 'fsspec']
# https://github.com/ray-project/ray/pull/17465
ray_require_list = ['ray', 'aioredis<2']
pytorch_spark_require_list = pytorch_require_list + \
spark_require_list + \
pyspark_require_list
# all frameworks' dependencies
all_frameworks_require_list = tensorflow_require_list + \
keras_require_list + \
pytorch_require_list + \
mxnet_require_list + \
spark_require_list + \
pyspark_require_list
# python packages required / recommended to develop horovod
# these are the earliest versions to work with Python 3.8
# keep in sync with Dockerfile.test.cpu
# NOTE: do not use versions with +cpu or +gpu here as users would need to add --find-links to pip
dev_require_list = ['tensorflow-cpu==2.2.0',
'keras==2.3.1',
'torch==1.4.0',
'torchvision==0.5.0',
'pytorch_lightning>=1.3.8',
'mxnet==1.5.0',
'pyspark==3.0.1'] + spark_require_list
# torchvision 0.5.0 depends on torch==1.4.0
# python packages required only to run tests
test_require_list = ['mock', 'pytest', 'pytest-forked', 'parameterized']
# Skip cffi if pytorch extension explicitly disabled
if not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):
require_list.append('cffi>=1.4.0')
def get_package_version():
return __version__ + "+" + os.environ['HOROVOD_LOCAL_VERSION'] if 'HOROVOD_LOCAL_VERSION' in os.environ else __version__
setup(name='horovod',
version=get_package_version(),
packages=find_packages(),
description='Distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.',
author='The Horovod Authors',
license='Apache 2.0',
long_description=textwrap.dedent('''\
Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.
The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),
url='https://github.com/horovod/horovod',
keywords=['deep learning', 'tensorflow', 'keras', 'pytorch', 'mxnet', 'spark', 'AI'],
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
ext_modules=[tensorflow_mpi_lib, torch_mpi_lib_v2, mxnet_mpi_lib],
cmdclass={'build_ext': custom_build_ext},
# cffi is required for PyTorch
# If cffi is specified in setup_requires, it will need libffi to be installed on the machine,
# which is undesirable. Luckily, `install` action will install cffi before executing build,
# so it's only necessary for `build*` or `bdist*` actions.
setup_requires=require_list if is_build_action() else [],
install_requires=require_list,
tests_require=test_require_list,
extras_require={
'all-frameworks': all_frameworks_require_list,
'tensorflow': tensorflow_require_list,
'tensorflow-cpu': tensorflow_cpu_require_list,
'tensorflow-gpu': tensorflow_gpu_require_list,
'keras': keras_require_list,
'pytorch': pytorch_require_list,
'mxnet': mxnet_require_list,
'spark': spark_require_list + pyspark_require_list,
'pytorch-spark': pytorch_spark_require_list,
'ray': ray_require_list,
'dev': dev_require_list,
'test': test_require_list,
},
python_requires='>=3.6',
zip_safe=False,
entry_points={
'console_scripts': [
'horovodrun = horovod.runner.launch:run_commandline'
]
})
| 41.142857
| 124
| 0.642253
|
794ccad0e8457c89068acb39cc7c0c8355457cf8
| 8,759
|
py
|
Python
|
tests/test_client_payment.py
|
captn3m0/razorpay
|
0352f2d81696984c96e51c55a81178c663be320f
|
[
"MIT"
] | 3
|
2015-11-18T10:28:07.000Z
|
2015-11-21T01:17:35.000Z
|
tests/test_client_payment.py
|
captn3m0/razorpay
|
0352f2d81696984c96e51c55a81178c663be320f
|
[
"MIT"
] | null | null | null |
tests/test_client_payment.py
|
captn3m0/razorpay
|
0352f2d81696984c96e51c55a81178c663be320f
|
[
"MIT"
] | null | null | null |
import responses
import json
from .helpers import mock_file, ClientTestCase
class TestClientPayment(ClientTestCase):
def setUp(self):
super(TestClientPayment, self).setUp()
self.base_url = '{}/payments'.format(self.base_url)
@responses.activate
def test_payment_all(self):
result = mock_file('payment_collection')
url = self.base_url
responses.add(responses.GET, url, status=200,
body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.payment.all(), result)
@responses.activate
def test_payment_all_with_options(self):
count = 1
result = mock_file('payment_collection_with_one_payment')
url = '{}?count={}'.format(self.base_url, count)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.all({'count': count}), result)
@responses.activate
def test_payment_fetch(self):
result = mock_file('fake_payment')
url = '{}/{}'.format(self.base_url, self.payment_id)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetch('fake_payment_id'), result)
@responses.activate
def test_payment_capture(self):
result = mock_file('fake_captured_payment')
url = '{}/{}/capture'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200,
body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.payment.capture(self.payment_id,
amount=5100), result)
@responses.activate
def test_refund_create(self):
result = mock_file('fake_refund')
url = '{}/{}/refund'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.refund(self.payment_id, 2000),
result)
@responses.activate
def test_transfer(self):
param = {
'transfers': {
'currency': {
'amount': 100,
'currency': 'INR',
'account': 'dummy_acc'
}
}
}
result = mock_file('transfers_collection_with_payment_id')
url = '{}/{}/transfers'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.transfer(self.payment_id, param),
result)
@responses.activate
def test_transfer_fetch(self):
result = mock_file('transfers_collection_with_payment_id')
url = '{}/{}/transfers'.format(self.base_url, self.payment_id)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.transfers(self.payment_id), result)
@responses.activate
def test_bank_transfer_fetch(self):
result = mock_file('fake_bank_transfer')
url = '{}/{}/bank_transfer'.format(self.base_url, self.payment_id)
responses.add(responses.GET,
url,
status=200,
body=result,
match_querystring=True)
response = self.client.payment.bank_transfer(self.payment_id)
self.assertEqual(response['virtual_account_id'], 'va_8J2ny4Naokqbpe')
self.assertEqual(response['payment_id'], self.payment_id)
@responses.activate
def test_upi_transfer_fetch(self):
result = mock_file('fake_upi_transfer')
url = '{}/{}/upi_transfer'.format(self.base_url, self.payment_id)
responses.add(responses.GET,
url,
status=200,
body=result,
match_querystring=True)
response = self.client.payment.upi_transfer(self.payment_id)
self.assertEqual(response['virtual_account_id'], 'va_8J2ny4Naokqbpf')
self.assertEqual(response['payment_id'], self.payment_id)
@responses.activate
def test_payment_refund(self):
init = {
"amount": "100"
}
result = mock_file('fake_refund')
url = '{}/{}/refund'.format(self.base_url, 'fake_refund_id')
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.refund('fake_refund_id',init), result)
@responses.activate
def test_payment_fetch_multiple_refund(self):
result = mock_file('refund_collection')
url = "{}/{}/refunds".format(self.base_url, 'fake_payment_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetch_multiple_refund(self.payment_id), result)
@responses.activate
def test_payment_fetch_refund_id(self):
result = mock_file('refund_collection')
url = "{}/{}/refunds/{}".format(self.base_url, 'fake_payment_id', 'fake_refund_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetch_refund_id('fake_payment_id', 'fake_refund_id'), result)
@responses.activate
def test_payment_edit(self):
param = {
"notes": {
"key1": "value3",
"key2": "value2"
}
}
result = mock_file('edit_payment')
url = '{}/{}'.format(self.base_url, 'dummy_id')
responses.add(responses.PATCH, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.edit('dummy_id', param), result)
@responses.activate
def test_fetch_card_detail(self):
result = mock_file('fake_card_detail_payment')
url = '{}/{}/card'.format(self.base_url, 'dummy_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetchCardDetails('dummy_id'), result)
@responses.activate
def test_fetch_downtimes(self):
result = mock_file('fake_card_detail_payment')
url = '{}/{}'.format(self.base_url, 'downtimes')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetchDownTime(), result)
@responses.activate
def test_fetch_downtime_by_id(self):
result = mock_file('fake_card_detail_payment')
url = '{}/downtimes/{}'.format(self.base_url, 'dummy_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetchDownTimeById('dummy_id'), result)
@responses.activate
def test_payment_json(self):
param = {
"amount": "500",
"currency": "INR",
"email": "gaurav.kumar@example.com",
"contact": "9123456789",
"order_id": "order_IfCjbAb066hM9i",
"method": "upi",
"card": {
"number": "4854980604708430",
"cvv": "123",
"expiry_month": "12",
"expiry_year": "21",
"name": "Gaurav Kumar"
}
}
result = mock_file('fake_payment_json')
url = "{}/create/{}".format(self.base_url, 'json')
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.createPaymentJson(param), result)
def createRecurring(self):
init = mock_file('init_create_recurring')
result = mock_file('fake_create_recurring')
url = "{}/{}/recurring".format(self.base_url,'create')
responses.add(responses.POST,
url,
status=200,
body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.createRecurring(init), result)
| 41.511848
| 110
| 0.600868
|
794ccb71011ae689ebbbb75ad382b542646615a3
| 11,423
|
py
|
Python
|
verticapy/learn/linear_model.py
|
afard/VerticaPy
|
ecbee0027a208ba53b31438e5b2f4577af95a07e
|
[
"Apache-2.0"
] | 52
|
2020-06-29T12:31:14.000Z
|
2022-03-31T20:24:23.000Z
|
verticapy/learn/linear_model.py
|
afard/VerticaPy
|
ecbee0027a208ba53b31438e5b2f4577af95a07e
|
[
"Apache-2.0"
] | 175
|
2020-07-13T18:16:28.000Z
|
2022-03-31T14:01:45.000Z
|
verticapy/learn/linear_model.py
|
afard/VerticaPy
|
ecbee0027a208ba53b31438e5b2f4577af95a07e
|
[
"Apache-2.0"
] | 21
|
2020-07-07T22:53:10.000Z
|
2022-03-04T11:30:48.000Z
|
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality to use to conduct
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to solve all of these problems. The idea is simple: instead
# of moving data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# VerticaPy Modules
from verticapy import vDataFrame
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
from verticapy.learn.vmodel import *
# ---#
class ElasticNet(Regressor):
"""
---------------------------------------------------------------------------
Creates a ElasticNet object using the Vertica Linear Regression algorithm
on the data. The Elastic Net is a regularized regression method that linearly
combines the L1 and L2 penalties of the Lasso and Ridge methods.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
C: float, optional
The regularization parameter value. The value must be zero or non-negative.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
CGD : Coordinate Gradient Descent
l1_ratio: float, optional
ENet mixture parameter that defines how much L1 versus L2 regularization
to provide.
"""
def __init__(
self,
name: str,
cursor=None,
tol: float = 1e-6,
C: float = 1.0,
max_iter: int = 100,
solver: str = "CGD",
l1_ratio: float = 0.5,
):
check_types([("name", name, [str],)])
self.type, self.name = "LinearRegression", name
self.set_params(
{
"penalty": "enet",
"tol": tol,
"C": C,
"max_iter": max_iter,
"solver": str(solver).lower(),
"l1_ratio": l1_ratio,
}
)
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
# ---#
class Lasso(Regressor):
"""
---------------------------------------------------------------------------
Creates a Lasso object using the Vertica Linear Regression algorithm on the
data. The Lasso is a regularized regression method which uses an L1 penalty.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
C: float, optional
The regularization parameter value. The value must be zero or non-negative.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
CGD : Coordinate Gradient Descent
"""
def __init__(
self,
name: str,
cursor=None,
tol: float = 1e-6,
C: float = 1.0,
max_iter: int = 100,
solver: str = "CGD",
):
check_types([("name", name, [str],)])
self.type, self.name = "LinearRegression", name
self.set_params(
{
"penalty": "l1",
"tol": tol,
"C": C,
"max_iter": max_iter,
"solver": str(solver).lower(),
}
)
for elem in ["l1_ratio"]:
if elem in self.parameters:
del self.parameters[elem]
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
# ---#
class LinearRegression(Regressor):
"""
---------------------------------------------------------------------------
Creates a LinearRegression object using the Vertica Linear Regression algorithm
on the data.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
"""
def __init__(
self,
name: str,
cursor=None,
tol: float = 1e-6,
max_iter: int = 100,
solver: str = "Newton",
):
check_types(
[("name", name, [str],), ("solver", solver.lower(), ["newton", "bfgs"],),]
)
self.type, self.name = "LinearRegression", name
self.set_params(
{
"penalty": "none",
"tol": tol,
"max_iter": max_iter,
"solver": str(solver).lower(),
}
)
for elem in ["l1_ratio", "C"]:
if elem in self.parameters:
del self.parameters[elem]
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
# ---#
class LogisticRegression(BinaryClassifier):
"""
---------------------------------------------------------------------------
Creates a LogisticRegression object using the Vertica Logistic Regression
algorithm on the data.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
penalty: str, optional
Determines the method of regularization.
None : No Regularization
L1 : L1 Regularization
L2 : L2 Regularization
ENet : Combination between L1 and L2
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
C: float, optional
The regularization parameter value. The value must be zero or non-negative.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
CGD : Coordinate Gradient Descent
l1_ratio: float, optional
ENet mixture parameter that defines how much L1 versus L2 regularization
to provide.
"""
def __init__(
self,
name: str,
cursor=None,
penalty: str = "None",
tol: float = 1e-6,
C: int = 1,
max_iter: int = 100,
solver: str = "Newton",
l1_ratio: float = 0.5,
):
check_types([("name", name, [str],)])
self.type, self.name = "LogisticRegression", name
self.set_params(
{
"penalty": str(penalty).lower(),
"tol": tol,
"C": C,
"max_iter": max_iter,
"solver": str(solver).lower(),
"l1_ratio": l1_ratio,
}
)
if penalty.lower() == "none":
for elem in ["l1_ratio", "C"]:
if elem in self.parameters:
del self.parameters[elem]
check_types([("solver", solver.lower(), ["bfgs", "newton"],)])
elif penalty.lower() in ("l1", "l2"):
for elem in ["l1_ratio",]:
if elem in self.parameters:
del self.parameters[elem]
check_types([("solver", solver.lower(), ["bfgs", "newton", "cgd"],)])
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
# ---#
class Ridge(Regressor):
"""
---------------------------------------------------------------------------
Creates a Ridge object using the Vertica Linear Regression algorithm on the
data. The Ridge is a regularized regression method which uses an L2 penalty.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
C: float, optional
The regularization parameter value. The value must be zero or non-negative.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
"""
def __init__(
self,
name: str,
cursor=None,
tol: float = 1e-6,
C: float = 1.0,
max_iter: int = 100,
solver: str = "Newton",
):
check_types(
[("name", name, [str], ("solver", solver.lower(), ["newton", "bfgs"],),)]
)
self.type, self.name = "LinearRegression", name
self.set_params(
{
"penalty": "l2",
"tol": tol,
"C": C,
"max_iter": max_iter,
"solver": str(solver).lower(),
}
)
for elem in ["l1_ratio"]:
if elem in self.parameters:
del self.parameters[elem]
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
| 32.54416
| 86
| 0.57428
|
794ccb9909087ce7b84c1ef162955cb8d22331f1
| 734
|
py
|
Python
|
manage.py
|
FGacheru/blog_app
|
3c32e7f38a39c4f77c95e7645bd58abf9083916b
|
[
"MIT"
] | null | null | null |
manage.py
|
FGacheru/blog_app
|
3c32e7f38a39c4f77c95e7645bd58abf9083916b
|
[
"MIT"
] | null | null | null |
manage.py
|
FGacheru/blog_app
|
3c32e7f38a39c4f77c95e7645bd58abf9083916b
|
[
"MIT"
] | null | null | null |
from app import create_app,db
from app.models import *
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Server
# Creating app instance
app = create_app('test')
app = create_app('production')
manager = Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app = app,db = db,User = User, Role = Role, Views = Views, Comments = Comments)
if __name__ == '__main__':
manager.run()
| 24.466667
| 95
| 0.723433
|
794cccc6af2e8734483a8786519b223529726026
| 9,823
|
py
|
Python
|
src/examples/MapReduceModel/main.py
|
MarkoRimac/YAFS
|
5ea354439e4acb4ca83714b01eb427b508718836
|
[
"MIT"
] | 58
|
2018-09-19T12:00:01.000Z
|
2022-03-28T12:14:32.000Z
|
src/examples/MapReduceModel/main.py
|
MarkoRimac/YAFS
|
5ea354439e4acb4ca83714b01eb427b508718836
|
[
"MIT"
] | 55
|
2018-03-18T09:58:27.000Z
|
2022-02-19T16:40:02.000Z
|
src/examples/MapReduceModel/main.py
|
MarkoRimac/YAFS
|
5ea354439e4acb4ca83714b01eb427b508718836
|
[
"MIT"
] | 51
|
2018-05-30T11:33:10.000Z
|
2022-03-14T15:37:01.000Z
|
"""
This example...
@author: Isaac Lera & Carlos Guerrero
"""
import json
import argparse
from yafs.core import Sim
from yafs.application import Application,Message
from yafs.topology import Topology
from yafs.placement import JSONPlacement,JSONPlacementOnCloud
from yafs.distribution import *
import numpy as np
import logging.config
import os
from yafs.utils import fractional_selectivity
from selection_multipleDeploys import DeviceSpeedAwareRouting
from jsonPopulation import JSONPopulation
import time
import networkx as nx
RANDOM_SEED = 1
def create_applications_from_json(data):
applications = {}
for app in data:
a = Application(name=app["name"])
modules = [{"None":{"Type":Application.TYPE_SOURCE}}]
for module in app["module"]:
if "RAM" in module.keys():
modules.append({module["name"]: {"RAM": module["RAM"], "Type": Application.TYPE_MODULE}})
else:
modules.append({module["name"]: {"RAM": 1, "Type": Application.TYPE_MODULE}})
a.set_modules(modules)
ms = {}
for message in app["message"]:
#print "Creando mensaje: %s" %message["name"]
ms[message["name"]] = Message(message["name"],message["s"],message["d"],instructions=message["instructions"],bytes=message["bytes"])
if message["s"] == "None":
a.add_source_messages(ms[message["name"]])
#print "Total mensajes creados %i" %len(ms.keys())
for idx, message in enumerate(app["transmission"]):
if "message_out" in message.keys():
value_treshld = 1.0
if "fractional" in message.keys():
value_treshld = message["fractional"]
a.add_service_module(message["module"],ms[message["message_in"]], ms[message["message_out"]], fractional_selectivity, threshold=value_treshld)
else:
a.add_service_module(message["module"], ms[message["message_in"]])
applications[app["name"]]=a
#a.add_service_module("Client", m_egg, m_sensor, fractional_selectivity, threshold=0.9)
return applications
###
# Thanks to this function, the user can control about the elemination of the nodes according with the modules deployed (see also DynamicFailuresOnNodes example)
###
"""
It returns the software modules (a list of identifiers of DES process) deployed on this node
"""
def getProcessFromThatNode(sim, node_to_remove):
if node_to_remove in sim.alloc_DES.values():
DES = []
# This node can have multiples DES processes on itself
for k, v in sim.alloc_DES.items():
if v == node_to_remove:
DES.append(k)
return DES,True
else:
return [],False
"""
It controls the elimination of a node
"""
idxFControl = 0
def failureControl(sim,filelog,ids):
global idxFControl
nodes = list(sim.topology.G.nodes())
if len(nodes)>1:
node_to_remove = ids[idxFControl]
idxFControl +=1
keys_DES,someModuleDeployed = getProcessFromThatNode(sim, node_to_remove)
print "\n\nRemoving node: %i, Total nodes: %i" % (node_to_remove, len(nodes))
print "\tStopping some DES processes: %s\n\n"%keys_DES
filelog.write("%i,%s,%d\n"%(node_to_remove, someModuleDeployed,sim.env.now))
##Print some information:
for des in keys_DES:
if des in sim.alloc_source.keys():
print "Removing a Gtw/User entity\t"*4
sim.remove_node(node_to_remove)
for key in keys_DES:
sim.stop_process(key)
else:
sim.stop = True ## We stop the simulation
def main(simulated_time,experimento,file,study,it):
random.seed(it)
np.random.seed(it)
"""
TOPOLOGY from a json
"""
t = Topology()
dataNetwork = json.load(open(experimento+file+'-network.json'))
t.load(dataNetwork)
attNodes = {}
for k in t.G.nodes():
attNodes[k] = {"IPT": 1}
nx.set_node_attributes(t.G, values=attNodes)
# t.write("network.gexf")
"""
APPLICATION
"""
studyApp = study
if study=="FstrRep":
studyApp="Replica"
elif study == "Cloud":
studyApp="Single"
dataApp = json.load(open(experimento+file+'-app%s.json'%studyApp))
apps = create_applications_from_json(dataApp)
#for app in apps:
# print apps[app]
"""
PLACEMENT algorithm
"""
placementJson = json.load(open(experimento+file+'-alloc%s.json'%study))
placement = JSONPlacement(name="Placement",json=placementJson)
### Placement histogram
# listDevices =[]
# for item in placementJson["initialAllocation"]:
# listDevices.append(item["id_resource"])
# import matplotlib.pyplot as plt
# print listDevices
# print np.histogram(listDevices,bins=range(101))
# plt.hist(listDevices, bins=100) # arguments are passed to np.histogram
# plt.title("Placement Histogram")
# plt.show()
## exit()
"""
POPULATION algorithm
"""
studyUser = study
if study == "FstrRep":
studyUser = "Replica"
elif study == "Cloud":
studyUser = "Single"
dataPopulation = json.load(open(experimento+file+'-users%s.json'%studyUser))
pop = JSONPopulation(name="Statical",json=dataPopulation,it=it)
"""
SELECTOR algorithm
"""
selectorPath = DeviceSpeedAwareRouting()
"""
SIMULATION ENGINE
"""
stop_time = simulated_time
s = Sim(t, default_results_path=experimento + "Results_%i_%s_%s_%i" % (it,file,study,stop_time))
"""
Failure process
"""
# time_shift = 10000
# distribution = deterministicDistributionStartPoint(name="Deterministic", time=time_shift,start=10000)
# failurefilelog = open(experimento+"Failure_%s_%i.csv" % (ilpPath,stop_time),"w")
# failurefilelog.write("node, module, time\n")
# idCloud = t.find_IDs({"type": "CLOUD"})[0] #[0] -> In this study there is only one CLOUD DEVICE
# centrality = np.load(pathExperimento+"centrality.npy")
# randomValues = np.load(pathExperimento+"random.npy")
# # s.deploy_monitor("Failure Generation", failureControl, distribution,sim=s,filelog=failurefilelog,ids=centrality)
# s.deploy_monitor("Failure Generation", failureControl, distribution,sim=s,filelog=failurefilelog,ids=randomValues)
#For each deployment the user - population have to contain only its specific sources
for aName in apps.keys():
#print "Deploying app: ",aName
pop_app = JSONPopulation(name="Statical_%s"%aName,json={},it=it)
data = []
for element in pop.data["sources"]:
if element['app'] == aName:
data.append(element)
pop_app.data["sources"]=data
s.deploy_app(apps[aName], placement, pop_app, selectorPath)
s.run(stop_time, test_initial_deploy=False, show_progress_monitor=False) #TEST to TRUE
## Enrouting information
# print "Values"
# print selectorPath.cache.values()
# failurefilelog.close()
# #CHECKS
#print s.topology.G.nodes
# s.print_debug_assignaments()
if __name__ == '__main__':
"""Main function"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--work-dir',
type=str,
default="",
help='Working directory')
parser.add_argument(
'--simulations',
type=int,
default=1,
help='Number of simulations')
parser.add_argument(
'--duration',
type=int,
default=100000,
help='Simulation time')
args, pipeline_args = parser.parse_known_args()
nSimulations = args.simulations
pathExperimento = args.work_dir
duration = args.duration
study = ""
#logging.config.fileConfig(os.getcwd()+'/logging.ini')
for i in range(nSimulations):
start_time = time.time()
# for f in xrange(10, 110, 10):
for f in xrange(100, 201, 10):
# file = "f%in50" % f
file = "f%in200" % f
print file
study = "Replica"
print "\tRunning %s" % study
main(simulated_time=duration, experimento=pathExperimento, file=file, study=study,it=i)
study = "Single"
print "\tRunning %s" % study
main(simulated_time=duration, experimento=pathExperimento, file=file, study=study,it=i)
study = "FstrRep"
print "\tRunning %s" % study
main(simulated_time=duration, experimento=pathExperimento, file=file, study=study,it=i)
# study = "Cloud"
# print "\tRunning %s" % study
# main(simulated_time=duration, experimento=pathExperimento, file=file, study=study,it=i)
print "SEGUNDA PARTE"
for n in xrange(100, 301, 20):
# for n in xrange(20, 220, 20):
file = "f100n%i" % n
# file = "f100n%i" % n
print file
study = "Replica"
print "\tRunning %s" % study
main(simulated_time=duration, experimento=pathExperimento, file=file, study=study,it=i)
study = "Single"
print "\tRunning %s" % study
main(simulated_time=duration, experimento=pathExperimento, file=file, study=study,it=i)
study = "FstrRep"
print "\tRunning %s" % study
main(simulated_time=duration, experimento=pathExperimento, file=file, study=study,it=i)
# study = "Cloud"
# print "\tRunning %s" % study
# main(simulated_time=duration, experimento=pathExperimento, file=file, study=study,it=i)
print "Simulation Done"
print("\n--- %s seconds ---" % (time.time() - start_time))
| 30.506211
| 160
| 0.628016
|
794ccd2d020eb2a95459d4108500063283259713
| 2,021
|
py
|
Python
|
AdminManageAPI/serializers.py
|
sammacorpy/House-keeping-management-only-api
|
01edaf85712c9f3a7daa809f544a299fe8f5fe39
|
[
"MIT"
] | 3
|
2019-07-17T15:36:43.000Z
|
2021-03-18T04:41:37.000Z
|
AdminManageAPI/serializers.py
|
sammacorpy/House-keeping-management-only-api
|
01edaf85712c9f3a7daa809f544a299fe8f5fe39
|
[
"MIT"
] | null | null | null |
AdminManageAPI/serializers.py
|
sammacorpy/House-keeping-management-only-api
|
01edaf85712c9f3a7daa809f544a299fe8f5fe39
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers,exceptions
from AdminManageAPI.models import *
from django.contrib.auth import authenticate,login
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model=User
fields='__all__'
class AssetSerializer(serializers.ModelSerializer):
class Meta:
model=Asset
fields=(
'id',
'name',
'tag',
'created_on',
'updated_on',
)
class ActivitySerializer(serializers.ModelSerializer):
class Meta:
model=Activity
fields=(
'id',
'name',
'frequency',
'created_on',
'updated_on',
'asset',
)
class WorkerSerializer(serializers.ModelSerializer):
class Meta:
model=Worker
fields=(
'id',
'name',
'skills',
'phone',
'created_on',
'updated_on',
)
class TaskAssignSerializer(serializers.ModelSerializer):
class Meta:
model=TaskAssign
fields=(
'id',
'task',
'asset',
'worker',
'timeOfAllocation',
'timeToComplete',
)
class LoginSerializer(serializers.Serializer):
username=serializers.CharField()
password=serializers.CharField()
def validate(self,data):
username=data.get('username',"")
password=data.get('password',"")
if username and password:
user=authenticate(username=username,password=password)
if user:
data['user']=user
else:
m="unable to login, wrong credential"
raise exceptions.ValidationError(m)
else:
m="enter both username and password "
raise exceptions.ValidationError(m)
return data
| 22.208791
| 66
| 0.53142
|
794ccd5d6fea9c2604b6b589368bfc870b448270
| 1,582
|
py
|
Python
|
setup.py
|
cloudnull/pasted-client
|
66839ae234ae13d0a69f08b7206e55ece0838d98
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
cloudnull/pasted-client
|
66839ae234ae13d0a69f08b7206e55ece0838d98
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
cloudnull/pasted-client
|
66839ae234ae13d0a69f08b7206e55ece0838d98
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
with open('README.rst', 'r') as r_file:
README = r_file.read()
setuptools.setup(
name = 'pasted-client',
version = '0.1.1',
description = 'Pasted client. Paste files or STDIN to a raw object.',
long_description = README,
author = 'Kevin Carter',
author_email = 'kevin@cloudnull.com',
url = 'http://github.com/cloudnull/pasted-client',
install_requires = [
'requests'
],
packages = [
'pasted_client'
],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules'
],
entry_points = {
"console_scripts": [
"pasted = pasted_client.pasted:cli"
]
}
)
| 31.64
| 75
| 0.651707
|
794ccf31c9e0493751c0b34bdcf1a7c87d577e3f
| 1,685
|
py
|
Python
|
Lesson05/transmembrane.py
|
NatalieTehranchi/learning_python
|
ea24162ef5d4042f0e969e0ed6b1aa0765a8bb55
|
[
"MIT"
] | null | null | null |
Lesson05/transmembrane.py
|
NatalieTehranchi/learning_python
|
ea24162ef5d4042f0e969e0ed6b1aa0765a8bb55
|
[
"MIT"
] | null | null | null |
Lesson05/transmembrane.py
|
NatalieTehranchi/learning_python
|
ea24162ef5d4042f0e969e0ed6b1aa0765a8bb55
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import biotoolbox
# Write a program that predicts if a protein is trans-membrane
# Trans-membrane proteins have the following properties
# Signal peptide: https://en.wikipedia.org/wiki/Signal_peptide
# Hydrophobic regions(s): https://en.wikipedia.org/wiki/Transmembrane_protein
# No prolines (alpha helix)
# Hydrophobicity is measued via Kyte-Dolittle
# https://en.wikipedia.org/wiki/Hydrophilicity_plot
# For our purposes:
# Signal peptide is 8 aa long, KD > 2.5, first 30 aa
# Hydrophobic region is 11 aa long, KD > 2.0, after 30 aa
parser = argparse.ArgumentParser(
description='Predicts transmembrane proteins.')
parser.add_argument('--file', required=True, type=str,
metavar='<path>', help='protein file')
parser.add_argument('--win1', required=False, type=int, default=8,
metavar='<int>', help='length of signal peptide [%(default)i]' )
parser.add_argument('--win2', required=False, type=int, default=11,
metavar='<int>', help='length of hydrophobic region [%(default)i]')
parser.add_argument('--kd1', required=False, type=float, default=2.5,
metavar='<float>', help='kd value for signal peptide [%(default)f]')
parser.add_argument('--kd2', required=False, type=float, default=2.0,
metavar='<float>', help='kd value for hydrophobic region [%(default)f]')
arg = parser.parse_args()
for name, seq in biotoolbox.read_fasta(arg.file):
if biotoolbox. hasHydrophobicHelix(seq[0:30], arg.kd1, arg.win2)\
and biotoolbox. hasHydrophobicHelix(seq[30:len(seq)], arg.kd2, arg.win2):
print(name)
"""
18w
Dtg
Krn
Lac
Mcr
PRY
Pxt
Pzl
QC
Ror
S1P
S2P
Spt
apn
bai
bdl
bou
bug
cue
drd
ft
grk
knk
ksh
m
nac
ort
rk
smo
thw
tsg
waw
zye
"""
| 22.77027
| 77
| 0.735905
|
794cd1542015cd7016fe92a4a93089a9f9f71303
| 831
|
py
|
Python
|
qriter/noxfile.py
|
tonyfast/writers-workshop
|
fa6d24330bc24f1e2060b00de06ff26236d24f21
|
[
"BSD-3-Clause"
] | null | null | null |
qriter/noxfile.py
|
tonyfast/writers-workshop
|
fa6d24330bc24f1e2060b00de06ff26236d24f21
|
[
"BSD-3-Clause"
] | null | null | null |
qriter/noxfile.py
|
tonyfast/writers-workshop
|
fa6d24330bc24f1e2060b00de06ff26236d24f21
|
[
"BSD-3-Clause"
] | 1
|
2021-05-14T16:31:33.000Z
|
2021-05-14T16:31:33.000Z
|
"""sessions for running tasks to build docs and packages
nox -s docs
"""
import os
import nox
CI = "GITHUB_ACTION" in os.environ or "READTHEDOCS" in os.environ
@nox.session(reuse_venv=True, python=False if CI else "3.8")
def docs(session):
session.install(*"""-rworks/requirements-docs.txt --ignore-installed""".split())
session.run(*"""doit build_docs""".split())
@nox.session(reuse_venv=True, python=False if CI else "3.8", venv_backend="conda")
def pdf(session):
session.conda_install(
*"""jupyter-book[sphinx,pdflatex] texlive-core -cconda-forge""".split()
)
session.install("bindep")
session.run("bindep")
session.run(*"jb build . --toc qww/toc.yml --config qww/config.yml".split())
session.run(*"jb build . --toc qww/toc.yml --config qww/config.yml --builder pdflatex".split())
| 36.130435
| 99
| 0.684717
|
794cd184f19e6b57f5ff303b228c397303f308b8
| 1,737
|
py
|
Python
|
test/sagemaker_tests/pytorch/training/integration/__init__.py
|
Jarryd-rk/deep-learning-containers
|
6b98175bb70f1badd7e64843914e1c475c3128fa
|
[
"Apache-2.0"
] | 7
|
2021-12-18T05:49:22.000Z
|
2021-12-28T09:52:32.000Z
|
test/sagemaker_tests/pytorch/training/integration/__init__.py
|
Jarryd-rk/deep-learning-containers
|
6b98175bb70f1badd7e64843914e1c475c3128fa
|
[
"Apache-2.0"
] | 2
|
2022-03-28T12:39:09.000Z
|
2022-03-29T12:42:01.000Z
|
test/sagemaker_tests/pytorch/training/integration/__init__.py
|
Jarryd-rk/deep-learning-containers
|
6b98175bb70f1badd7e64843914e1c475c3128fa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
resources_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
mnist_path = os.path.join(resources_path, 'mnist')
mnist_script = os.path.join(mnist_path, 'mnist.py')
smdataparallel_mnist_script = os.path.join(mnist_path, 'smdataparallel_mnist_script_mode.sh')
fastai_path = os.path.join(resources_path, 'fastai')
fastai_cifar_script = os.path.join(fastai_path, 'train_cifar.py')
fastai_mnist_script = os.path.join(fastai_path, 'mnist.py')
data_dir = os.path.join(mnist_path, 'data')
training_dir = os.path.join(data_dir, 'training')
dist_operations_path = os.path.join(resources_path, 'distributed_operations.py')
smdebug_mnist_script = os.path.join(mnist_path, 'smdebug_mnist.py')
mnist_1d_script = os.path.join(mnist_path, 'mnist_1d.py')
model_cpu_dir = os.path.join(mnist_path, 'model_cpu')
model_cpu_1d_dir = os.path.join(model_cpu_dir, '1d')
model_gpu_dir = os.path.join(mnist_path, 'model_gpu')
model_gpu_1d_dir = os.path.join(model_gpu_dir, '1d')
call_model_fn_once_script = os.path.join(resources_path, 'call_model_fn_once.py')
ROLE = 'dummy/unused-role'
DEFAULT_TIMEOUT = 20
| 45.710526
| 93
| 0.776051
|
794cd1e0b7e241159a699f232ad898046566b58d
| 2,358
|
py
|
Python
|
3D_CNN/SequenceBatchGenerator.py
|
dahe-cvl/apa_paper
|
bec38e0270fda6f0fd092eacc6f10344b26a0f19
|
[
"MIT"
] | 1
|
2021-05-13T10:33:20.000Z
|
2021-05-13T10:33:20.000Z
|
3D_CNN/SequenceBatchGenerator.py
|
dahe-cvl/apa_paper
|
bec38e0270fda6f0fd092eacc6f10344b26a0f19
|
[
"MIT"
] | null | null | null |
3D_CNN/SequenceBatchGenerator.py
|
dahe-cvl/apa_paper
|
bec38e0270fda6f0fd092eacc6f10344b26a0f19
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from DataAugmentation import DataAugmentation
class SequenceBatchGenerator:
# Create minibatches of a given size from a dataset.
# Preserves the original sample order unless shuffle() is used.
batchsize = 0;
dataset = None;
tform = None;
stat = None;
nBatches = 0;
b = [];
dataGenerator = None;
mode = 0;
shuffled_idx = [];
sequences = 6;
def __init__(self, dataset, split, sequences):
# Constructor.
# dataset is a ClassificationDataset to wrap.
# bs is an integer specifying the minibatch size.
# tform is an optional SampleTransformation.
# If given, tform is applied to all samples returned in minibatches.
self.dataset = dataset;
self.sequences = sequences;
print(self.sequences);
self.dataGenerator = DataAugmentation();
if(split == "train"):
self.mode = 0;
elif(split == "val"):
self.mode = 1;
elif(split == "test"):
self.mode = 2;
def SequenceGenerator(self):
if(self.mode == 'train'):
while(1):
for i in range(1, 4801):
# get samples of VID
ids, names, samples, labels = getAllSamplesOfID(vid);
s = np.zeros((self.sequences, samples.shape[1], samples.shape[2], samples.shape[3]));
if(self.sequences > samples.shape[0]):
s[:samples.shape[0],:,:,:] = train_x[:samples.shape[0],:,:,:];
elif(self.sequences <= samples.shape[0]):
s[:self.sequences,:,:,:] = train_x[:self.sequences,:,:,:];
s = np.reshape(s, (1, self.sequences, samples.shape[1], samples.shape[2], samples.shape[3]));
l = labels[:1, :];
yield s, l;
elif(self.mode == 'val'):
while(1):
for i in range(4800, 6001):
# get samples of VID
ids, names, samples, labels = getAllSamplesOfID(vid);
s = np.zeros((self.sequences, samples.shape[1], samples.shape[2], samples.shape[3]));
if(self.sequences > samples.shape[0]):
s[:samples.shape[0],:,:,:] = train_x[:samples.shape[0],:,:,:];
elif(self.sequences <= samples.shape[0]):
s[:self.sequences,:,:,:] = train_x[:self.sequences,:,:,:];
s = np.reshape(s, (1, self.sequences, samples.shape[1], samples.shape[2], samples.shape[3]));
l = labels[:1, :];
yield s, l;
def printSequenceImages(self, b):
for i in range(0, int(b.shape[0]) , 1):
im = plt.imshow(b[i]);
plt.pause(0.6);
plt.show();
| 28.409639
| 98
| 0.630195
|
794cd2b9a5aac8b13f7a34688d69809e8916091a
| 631
|
py
|
Python
|
manage.py
|
petmik2018/DRF_backend
|
d81cf75db1451bfd8f5ba9205c7353d7ea845dab
|
[
"MIT"
] | null | null | null |
manage.py
|
petmik2018/DRF_backend
|
d81cf75db1451bfd8f5ba9205c7353d7ea845dab
|
[
"MIT"
] | 7
|
2020-06-06T01:46:05.000Z
|
2022-02-10T10:29:31.000Z
|
manage.py
|
petmik2018/DRF_backend
|
d81cf75db1451bfd8f5ba9205c7353d7ea845dab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf_backend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.681818
| 75
| 0.684628
|
794cd3fc01e8683cf80a54a7cc02eff9b79238c5
| 898
|
py
|
Python
|
actions/format.py
|
cognifloyd/stackstorm-csv
|
75af77cf905f41a0551b17b587d79859356e1c94
|
[
"Apache-2.0"
] | null | null | null |
actions/format.py
|
cognifloyd/stackstorm-csv
|
75af77cf905f41a0551b17b587d79859356e1c94
|
[
"Apache-2.0"
] | null | null | null |
actions/format.py
|
cognifloyd/stackstorm-csv
|
75af77cf905f41a0551b17b587d79859356e1c94
|
[
"Apache-2.0"
] | null | null | null |
import csv
from six.moves import StringIO
from st2common.runners.base_action import Action
from st2common.exceptions.action import InvalidActionParameterException
__all__ = [
'FormatCSVAction'
]
class FormatCSVAction(Action):
def run(self, data, delimiter=',', quote_char='"'):
if len(data) == 0:
raise InvalidActionParameterException("data has no rows")
if not isinstance(data, list):
raise InvalidActionParameterException("data must be a list")
if not isinstance(data[0], dict):
raise InvalidActionParameterException("data must be a list of dict")
fieldnames = data[0].keys()
sh = StringIO()
writer = csv.DictWriter(sh, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
out = sh.getvalue()
sh.close()
return out
| 27.212121
| 80
| 0.650334
|
794cd481bd185a8071cfdd33a47d73336666de69
| 222
|
py
|
Python
|
crash_course/ch04/dimensions.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
crash_course/ch04/dimensions.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
crash_course/ch04/dimensions.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
dimensions = (200, 50)
print(dimensions[0])
print(dimensions[1])
print('Original dimensions:')
for d in dimensions:
print(d)
dimensions = (400, 100)
print('\nModified dimensions:')
for d in dimensions:
print(d)
| 15.857143
| 31
| 0.693694
|
794cd5345fa0677178c67136261cf8856246f808
| 6,777
|
py
|
Python
|
bindings/python/cntk/ops/tests/block_test.py
|
MSXC/CNTK
|
d223d48b411bc994acd465ed333c9f6bed64dd7f
|
[
"RSA-MD"
] | null | null | null |
bindings/python/cntk/ops/tests/block_test.py
|
MSXC/CNTK
|
d223d48b411bc994acd465ed333c9f6bed64dd7f
|
[
"RSA-MD"
] | null | null | null |
bindings/python/cntk/ops/tests/block_test.py
|
MSXC/CNTK
|
d223d48b411bc994acd465ed333c9f6bed64dd7f
|
[
"RSA-MD"
] | null | null | null |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
"""
Unit tests for as_block operation, only forward pass is tested
"""
from __future__ import division
import numpy as np
import pytest
from .ops_test_utils import unittest_helper, _test_unary_op, _test_binary_op, AA, I, precision, PRECISION_TO_TYPE, cntk_device
import cntk as C
from cntk.axis import Axis
from cntk.internal import sanitize_dtype_cntk
from .. import constant
AS_BLOCK_TEST_CASES = [
#(input_shape, output_shape, expected_output_shape)
((2, 3), (3, 2), (3, 2)),
((2, 3), (6, 1), (6, 1)),
((6, 1), (2, 3), (2, 3)),
((2, 3, 5), (5, 6), (5, 6)),
((2, 3, 5), (C.InferredDimension, 6), (5, 6)),
((2, 3, 5), (5, C.InferredDimension), (5, 6)),
]
@pytest.mark.parametrize("input_shape, output_shape, expected_output_shape", AS_BLOCK_TEST_CASES)
def test_op_as_block(input_shape, output_shape, expected_output_shape, device_id, precision):
# We test using reshape as the operation that is encapsulated in a block
dev = cntk_device(device_id)
from cntk.internal import sanitize_dtype_cntk
from .. import reshape, element_times, as_block
num_tensor_elements = np.multiply.reduce(input_shape)
input_tensor = np.arange(
num_tensor_elements, dtype=PRECISION_TO_TYPE[precision]).reshape(input_shape)
input_reshaped = input_tensor.reshape(expected_output_shape)
a_placeholder = C.placeholder_variable();
a_reshaped = reshape(a_placeholder, output_shape)
const_input_reshaped = constant(input_reshaped, device=dev)
block_composite = element_times(a_reshaped, const_input_reshaped, name='element_times_inside_block')
a = I(shape=input_tensor.shape,
dtype=sanitize_dtype_cntk(PRECISION_TO_TYPE[precision]),
needs_gradient=True,
name='a')
input_op = as_block(block_composite, [(a_placeholder, a)], 'reshape_test_op', block_instance_name='reshape_test_op')
# Test some basic methods related to blocks
assert input_op.is_composite
block_primitive = input_op.root_function.find_by_name('reshape_test_op')
assert block_primitive.name == 'reshape_test_op'
assert block_primitive.is_primitive
assert block_primitive.is_block
element_times_inside_block = block_primitive.block_root.find_by_name('element_times_inside_block')
assert element_times_inside_block.name == 'element_times_inside_block'
assert element_times_inside_block.is_primitive
block_arguments_map = block_primitive.block_arguments_mapping
assert len(block_arguments_map) == 1
expected_forward = [[input_reshaped**2]]
expected_backward = {a: input_tensor}
# create batch
input_tensor.shape = (1, 1) + input_tensor.shape
forward_input = {a: input_tensor}
unittest_helper(input_op,
forward_input, expected_forward, expected_backward,
device_id=device_id, precision=precision)
def test_combine_op_as_block():
# We test using combine as the operation that is encapsulated in a block
from .. import combine, placeholder_variable, as_block, input_variable
f = combine([placeholder_variable()])
f = as_block(f, [(f.placeholders[0], placeholder_variable())], 'id')
x = placeholder_variable()
y = placeholder_variable()
x = f.clone('share', {f.placeholders[0]: x})
z = x - y
# connect to inputs
z.replace_placeholders({z.placeholders[0]: input_variable(1), z.placeholders[1]: input_variable(1)})
# evaluate
res = z.eval({z.arguments[0]: [[5.0]], z.arguments[1]: [[3.0]]})
expected_forward = [[[2.]]]
assert np.array_equal(res, expected_forward)
def test_block_with_duplicate_inputs():
from .. import placeholder_variable, as_block, input_variable
x = input_variable((1,), name='input')
left_operand_placeholder = placeholder_variable(name='left_placeholder')
right_operand_placeholder = placeholder_variable()
plus_block = as_block(right_operand_placeholder + left_operand_placeholder, [(left_operand_placeholder, x), (right_operand_placeholder, x)], 'plus')
plus_block_clone = plus_block.clone('share')
def test_as_block_with_function_in_arguments_map():
from .. import placeholder_variable, as_block, input_variable
x = input_variable((1,), name='input')
x_plus_2 = x + 2
left_operand_placeholder = placeholder_variable(name='left_placeholder')
right_operand_placeholder = placeholder_variable()
plus_block = as_block(right_operand_placeholder + left_operand_placeholder, [(left_operand_placeholder, x_plus_2), (right_operand_placeholder, x)], 'plus')
# evaluate
res = plus_block.eval({plus_block.arguments[0]: [[1.0]]})
expected_forward = [[[4.]]]
assert np.array_equal(res, expected_forward)
def test_block_clone():
from .. import placeholder_variable, as_block, input_variable, parameter, times
x = input_variable((1,), name='input')
operand_placeholder = placeholder_variable(name='placeholder')
w = parameter(shape=(1,1), init=1)
b = parameter(shape=(1,), init=2)
block_composite = times(operand_placeholder, w) + b
dense_block = as_block(block_composite, [(operand_placeholder, x)], 'dense')
w_new = parameter(shape=(1,1), init=3)
dense_block_clone = dense_block.clone('share', {w : w_new})
assert dense_block_clone.parameters[0].uid == b.uid
assert dense_block_clone.inputs[1].uid == w_new.uid
result = dense_block_clone.eval({dense_block_clone.arguments[0] : [np.asarray([2.], dtype=np.float32)]})
assert np.array_equal(result, [[[8.]]])
def test_root_block_clone():
from .. import placeholder_variable, as_block, input_variable, parameter, times
x = input_variable((1,), name='input')
operand_placeholder = placeholder_variable(name='placeholder')
w = parameter(shape=(1,1), init=1)
b1 = parameter(shape=(1,), init=2)
block_composite = times(operand_placeholder, w) + b1
dense_block = as_block(block_composite, [(operand_placeholder, x)], 'dense')
b2 = parameter(shape=(1,), init=3)
replacement = dense_block + b2
dense_block_clone = dense_block.clone('share', {dense_block : replacement})
assert replacement.root_function.uid == dense_block_clone.root_function.uid
assert dense_block_clone.parameters[0].uid == w.uid
assert dense_block_clone.parameters[1].uid == b1.uid
assert dense_block_clone.parameters[2].uid == b2.uid
result = dense_block_clone.eval({x : [np.asarray([2.], dtype=np.float32)]})
assert np.array_equal(result, [[[7.]]])
| 39.17341
| 159
| 0.710491
|
794cd628894a14fc7b2c57b60f264fb45c3fe219
| 411
|
py
|
Python
|
fetch-data.py
|
gidoca/renormalize
|
45ef51677043239dc0cb6af71528a2401635d842
|
[
"MIT"
] | null | null | null |
fetch-data.py
|
gidoca/renormalize
|
45ef51677043239dc0cb6af71528a2401635d842
|
[
"MIT"
] | null | null | null |
fetch-data.py
|
gidoca/renormalize
|
45ef51677043239dc0cb6af71528a2401635d842
|
[
"MIT"
] | null | null | null |
import requests
import json
url_bs : str = 'https://data.bs.ch/api/records/1.0/search/?dataset=100111&q=&sort=-datum&facet=datum'
res_bs = requests.get(url_bs)
res_bs.raise_for_status()
data = [{'date': record['fields']['datum'], 'count': record['fields']['total_geimpfte_personen']} for record in res_bs.json()['records']]
with open('assets/generated/cumulative.json', 'w') as file:
json.dump(data, file)
| 41.1
| 137
| 0.717762
|
794cd74c4ae2c6c86b1ec7863359e07f3d9a9e2d
| 2,499
|
py
|
Python
|
test/system/test_eapictl.py
|
arista-eosplus/eapictl
|
38917722dd61224f044f30daf21f8120cdf034fa
|
[
"BSD-3-Clause"
] | 2
|
2017-08-24T04:41:07.000Z
|
2020-02-27T00:14:11.000Z
|
test/system/test_eapictl.py
|
arista-eosplus/eapictl
|
38917722dd61224f044f30daf21f8120cdf034fa
|
[
"BSD-3-Clause"
] | 1
|
2015-04-14T18:06:19.000Z
|
2017-04-26T13:45:31.000Z
|
test/system/test_eapictl.py
|
arista-eosplus/eapictl
|
38917722dd61224f044f30daf21f8120cdf034fa
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import os
import json
import shlex
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from StringIO import StringIO
from systestlib import get_fixture
import eapictl.app
class TestStatus(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
sys.stdout = StringIO()
self.connection = open(get_fixture('dut')).readlines()[0]
self.config = get_fixture('eapi.conf')
def tearDown(self):
sys.stdout = self.stdout
def runcmd(self, cmdline):
cmdline = str(cmdline).format(connection=self.connection)
cmdline = shlex.split(cmdline)
cmdline.extend(['--config', self.config])
eapictl.app.main(cmdline)
def test_status_command(self):
""" status {connection}
"""
keys = ['http', 'http_port', 'enabled', 'https_port', 'https']
self.runcmd('status {connection}')
resp = json.loads(sys.stdout.getvalue())
self.assertEqual(sorted(resp.keys()), sorted(keys))
def test_start_command(self):
self.runcmd('stop {connection}')
self.runcmd('start {connection}')
output = sys.stdout.getvalue().split('\n')
resp = json.loads(output[1])
self.assertTrue(resp['enabled'])
def test_stop_command(self):
self.runcmd('start {connection}')
self.runcmd('stop {connection}')
output = sys.stdout.getvalue().split('\n')
resp = json.loads(output[1])
self.assertFalse(resp['enabled'])
def test_configure_transport_http(self):
""" restart {connection} --transport http
"""
self.runcmd('restart {connection} --transport http')
output = sys.stdout.getvalue()
resp = json.loads(output)
self.assertEqual(resp['http'], 'running')
def test_configure_transport_https(self):
""" restart {connection} --transport https
"""
self.runcmd('restart {connection} --transport https')
output = sys.stdout.getvalue()
resp = json.loads(output)
self.assertEqual(resp['https'], 'running')
def test_configure_server_port(self):
""" restart {connection} --transport http --eapi-port 8080
"""
self.runcmd('restart {connection} --transport http --eapi-port 8080')
output = sys.stdout.getvalue()
resp = json.loads(output)
self.assertEqual(resp['http_port'], '8080')
if __name__ == '__main__':
unittest.main()
| 28.724138
| 77
| 0.62425
|
794cd758a58be1c10db0ca0aed57cfef46717710
| 3,658
|
py
|
Python
|
Landmark, Triangulation & Face Morphing/Code/part3.py
|
cansuynk/ComputerVision
|
441e8621528ddae9d213d1633e4317d1ffb6abfa
|
[
"MIT"
] | null | null | null |
Landmark, Triangulation & Face Morphing/Code/part3.py
|
cansuynk/ComputerVision
|
441e8621528ddae9d213d1633e4317d1ffb6abfa
|
[
"MIT"
] | null | null | null |
Landmark, Triangulation & Face Morphing/Code/part3.py
|
cansuynk/ComputerVision
|
441e8621528ddae9d213d1633e4317d1ffb6abfa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import dlib
from part2 import catLandmarks
#Loads images
catImage = cv2.imread("./CAT_00/00000095_001.jpg")
rows, cols, ch = catImage.shape
firstImage = cv2.imread("./dennis_ritchie.jpg")
secondImage = cv2.imread("./yusuf.jpg")
#Since it is necessary for all three photos to be same size, I resized the photos
firstImage = cv2.resize(firstImage,(cols,rows))
secondImage = cv2.resize(secondImage,(cols,rows))
#This function finds the face landmarks for images
def landmarks(image):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rectangles = detector(gray)
points = predictor(gray, rectangles[0])
return points
def subdivPoints (image, landmarks_x, landmarks_y):
#Performs Delaunay Triangulation
subdiv = cv2.Subdiv2D((0,0,image.shape[1]+1, image.shape[0]+1))
#Insert landmark points
for i in range(0, 68):
subdiv.insert((landmarks_x[i],landmarks_y[i]))
rows, cols, ch = image.shape
#Also insert corners and the midpoints of the edges
subdiv.insert((0,0))
subdiv.insert((0, rows/2))
subdiv.insert((cols/2, 0))
subdiv.insert((cols-1, 0))
subdiv.insert((cols-1, rows/2))
subdiv.insert((0, rows-1))
subdiv.insert((cols/2, rows-1))
subdiv.insert((cols-1, rows-1))
#Obtains full list of triangles
triangles = subdiv.getTriangleList()
return triangles
#Draw triangles
def drawLines (triangles, image):
for i in range(len(triangles)):
sel_triangle = triangles[i].astype(np.int)
for points in sel_triangle:
point1 = (sel_triangle[0], sel_triangle[1])
point2 = (sel_triangle[2], sel_triangle[3])
point3 = (sel_triangle[4], sel_triangle[5])
cv2.line(image, point1, point2, (0, 255, 0), 1)
cv2.line(image, point2, point3, (0, 255, 0), 1)
cv2.line(image, point1, point3, (0, 255, 0), 1)
################################################################################
landmarkPoints = landmarks(firstImage)
landmarks_x = []
landmarks_y = []
#I save the landmark points x and y coordinates separately
for i in range(0, 68):
landmarks_x.append(landmarkPoints.part(i).x)
landmarks_y.append(landmarkPoints.part(i).y)
#Find and draw triangles
triangles_1 = subdivPoints(firstImage, landmarks_x, landmarks_y)
drawLines(triangles_1, firstImage)
landmarkPoints = landmarks(secondImage)
landmarks_x = []
landmarks_y = []
for i in range(0, 68):
landmarks_x.append(landmarkPoints.part(i).x)
landmarks_y.append(landmarkPoints.part(i).y)
triangles_2 = subdivPoints(secondImage, landmarks_x, landmarks_y)
drawLines(triangles_2, secondImage)
#Calls function from part2 to take landmark points of cat
catLandmark_x, catLandmark_y = catLandmarks()
triangles_3 = subdivPoints(catImage, catLandmark_x, catLandmark_y)
drawLines(triangles_3, catImage)
#To display the images you can open the comments
"""
cv2.imshow("Output1", firstImage)
cv2.imshow("Output2", secondImage)
cv2.imshow("Cat", catImage)
cv2.waitKey(0)
"""
#To save the image you can open the comments
"""
cv2.imwrite("Part3_dennis.jpg", firstImage)
cv2.imwrite("Part3_yusuf.jpg", secondImage)
cv2.imwrite("Part3_cat.jpg", catImage)
"""
| 27.923664
| 82
| 0.6386
|
794cd8433f7956863e22a503a6ad9d953717dd98
| 1,296
|
py
|
Python
|
ch06/overfit_dropout.py
|
atocplusplus/test
|
471ff64c25d27eaad58d8b5a9e787249db974d44
|
[
"MIT"
] | null | null | null |
ch06/overfit_dropout.py
|
atocplusplus/test
|
471ff64c25d27eaad58d8b5a9e787249db974d44
|
[
"MIT"
] | null | null | null |
ch06/overfit_dropout.py
|
atocplusplus/test
|
471ff64c25d27eaad58d8b5a9e787249db974d44
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.multi_layer_net_extend import MultiLayerNetExtend
from common.trainer import Trainer
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
# 過学習を再現するために、学習データを削減
x_train = x_train[:300]
t_train = t_train[:300]
use_dropout = True # Dropoutなしのときの場合はFalseに
dropout_ratio = 0.15
network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100],
output_size=10, use_dropout=use_dropout, dropout_ration=dropout_ratio)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=301, mini_batch_size=100,
optimizer='sgd', optimizer_param={'lr': 0.01}, verbose=True)
trainer.train()
train_acc_list, test_acc_list = trainer.test_acc_list, trainer.train_acc_list
# グラフの描画==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
| 35.027027
| 100
| 0.725309
|
794cd8856437f1fc981614237f747c9335a6968a
| 1,432
|
py
|
Python
|
fHDHR_web/pages/settings_html.py
|
CarloDiGi/fHDHR_Ceton
|
c8ab6405aeca6f32e6df37e1dabc9269642a4aeb
|
[
"WTFPL"
] | 1
|
2021-03-08T23:34:49.000Z
|
2021-03-08T23:34:49.000Z
|
fHDHR_web/pages/settings_html.py
|
CarloDiGi/fHDHR_Ceton
|
c8ab6405aeca6f32e6df37e1dabc9269642a4aeb
|
[
"WTFPL"
] | null | null | null |
fHDHR_web/pages/settings_html.py
|
CarloDiGi/fHDHR_Ceton
|
c8ab6405aeca6f32e6df37e1dabc9269642a4aeb
|
[
"WTFPL"
] | null | null | null |
from flask import request, render_template, session
class Settings_HTML():
endpoints = ["/settings", "/settings.html"]
endpoint_name = "page_settings_html"
endpoint_access_level = 1
endpoint_category = "tool_pages"
pretty_name = "Settings"
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
web_settings_dict = {}
for config_section in list(self.fhdhr.config.conf_default.keys()):
web_settings_dict[config_section] = {}
for config_item in list(self.fhdhr.config.conf_default[config_section].keys()):
if self.fhdhr.config.conf_default[config_section][config_item]["config_web"]:
web_settings_dict[config_section][config_item] = {
"value": self.fhdhr.config.dict[config_section][config_item],
"value_default": self.fhdhr.config.conf_default[config_section][config_item]["value"],
"hide": self.fhdhr.config.conf_default[config_section][config_item]["config_web_hidden"]
}
if not len(web_settings_dict[config_section].keys()):
del web_settings_dict[config_section]
return render_template('settings.html', request=request, session=session, fhdhr=self.fhdhr, web_settings_dict=web_settings_dict, list=list)
| 42.117647
| 147
| 0.651536
|
794cd8d43ab62723043908224e5d375785aff82c
| 467
|
py
|
Python
|
pvm/transition_state.py
|
gosion/pyPvm
|
d7326799c907b660db11b02fd16843fdb4733eb7
|
[
"MIT"
] | null | null | null |
pvm/transition_state.py
|
gosion/pyPvm
|
d7326799c907b660db11b02fd16843fdb4733eb7
|
[
"MIT"
] | null | null | null |
pvm/transition_state.py
|
gosion/pyPvm
|
d7326799c907b660db11b02fd16843fdb4733eb7
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
class TransitionState(IntEnum):
def __new__(cls, value, phrase, description=""):
obj = int.__new__(cls, value)
obj._value_ = value
obj.phrase = phrase
obj.description = description
return obj
Pending = 1, "Pending", "Pending to run."
Passed = 1 << 1, "Passed", "Passed."
Waiting = 1 << 2, "Waiting", "Waiting for interaction."
Blocked = 1 << 3, "Blocked", "Cannot run forward."
| 27.470588
| 59
| 0.608137
|
794cd9454e55e94d3a51f4e63792c7ce9d0bd264
| 271
|
py
|
Python
|
app/__init__.py
|
AdamKobi/s3uploader
|
25266604ecfa4cced02f88a3cf063acfcf20ca93
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
AdamKobi/s3uploader
|
25266604ecfa4cced02f88a3cf063acfcf20ca93
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
AdamKobi/s3uploader
|
25266604ecfa4cced02f88a3cf063acfcf20ca93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2
# Author: Adam Kobi <adamkobi12@gmail.com>
from .utils import *
logger = init_logging('uploader')
cfg = init_config()
if cfg['debug']:
logger.setLevel(logging.DEBUG)
logger.debug("Configuration - '{}'".format(cfg))
logger.info("Initiated")
| 19.357143
| 52
| 0.693727
|
794cd949b0fa85d9181a3bf04fd4d9123918692b
| 8,619
|
py
|
Python
|
pub_data_visualization/global_tools/compute_delivery_period_index.py
|
cre-os/pub-data-visualization
|
e5ec45e6397258646290836fc1a3b39ad69bf266
|
[
"MIT"
] | 10
|
2020-10-08T11:35:49.000Z
|
2021-01-22T16:47:59.000Z
|
pub_data_visualization/global_tools/compute_delivery_period_index.py
|
cre-os/pub-data-visualization
|
e5ec45e6397258646290836fc1a3b39ad69bf266
|
[
"MIT"
] | 3
|
2021-03-15T14:26:43.000Z
|
2021-12-02T15:27:49.000Z
|
pub_data_visualization/global_tools/compute_delivery_period_index.py
|
cre-dev/pub-data-visualization
|
229bb7a543684be2cb06935299345ce3263da946
|
[
"MIT"
] | 1
|
2021-01-22T16:47:10.000Z
|
2021-01-22T16:47:10.000Z
|
import pandas as pd
import re
#
from .. import global_var
def compute_delivery_period_index(frequency = None,
delivery_begin_dt_local = None,
delivery_end_date_local = None,
tz_local = None,
profile = None,
):
"""
Computes the delivery period index of a given contract.
:param frequency: The type of delivery contract (year, month, etc.)
:param delivery_begin_dt_local: The beginning datetime of the delivery
:param delivery_end_date_local: The end date of the delivery
:param local_tz: The local timezone
:param profile: The profile of the contract
:type frequency: string
:type delivery_begin_dt_local: pd.Timestamp
:type delivery_end_date_local: pd.Timestamp
:type local_tz: pytz.tzfile
:type profile: string
:return: The delivery period index
:rtype: int
"""
if ( pd.isnull(delivery_begin_dt_local)
or frequency == global_var.contract_frequency_unknown
or frequency == global_var.contract_frequency_spread
):
return global_var.contract_delivery_period_index_unknown
assert tz_local
assert delivery_begin_dt_local.tz.zone == (tz_local
if type(tz_local) == str
else
tz_local.zone
), (delivery_begin_dt_local.tz.zone,
tz_local,
)
if frequency == global_var.contract_frequency_half_hour:
ans = int(global_var.contract_delivery_period_index_half_hour.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
hour = delivery_begin_dt_local.hour,
minute = delivery_begin_dt_local.minute,
))
elif frequency == global_var.contract_frequency_hour:
ans = int(global_var.contract_delivery_period_index_hour.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
hour = delivery_begin_dt_local.hour,
))
elif frequency == global_var.contract_frequency_bloc:
if profile == global_var.contract_profile_unknown:
ans = global_var.contract_delivery_period_index_unknown
else:
bloc_match = re.compile(global_var.contract_profile_bloc_pattern).match(profile)
hour_begin = int(bloc_match.group(1))
hour_end = int(bloc_match.group(2))
assert hour_begin < hour_end or hour_end == 0
ans = int(global_var.contract_delivery_period_index_bloc.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
hour_begin = hour_begin,
hour_end = hour_end,
))
elif frequency == global_var.contract_frequency_day:
ans = int(global_var.contract_delivery_period_index_day.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_days:
ans = int(global_var.contract_delivery_period_index_days.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
nb_days = int(( delivery_end_date_local
- delivery_begin_dt_local.replace(hour = 0, minute = 0)
).total_seconds()/(3600*24)),
))
elif frequency == global_var.contract_frequency_weekbgn:
ans = int(global_var.contract_delivery_period_index_weekbgn.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_weekend:
ans = int(global_var.contract_delivery_period_index_weekend.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_week:
ans = int(global_var.contract_delivery_period_index_week.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_bow:
ans = int(global_var.contract_delivery_period_index_bow.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_month:
ans = delivery_begin_dt_local.month
elif frequency == global_var.contract_frequency_months:
ans = int(global_var.contract_delivery_period_index_months.format(month = delivery_begin_dt_local.month,
nb_months = ( 12*(delivery_end_date_local.year - delivery_begin_dt_local.year)
+ delivery_end_date_local.month - delivery_begin_dt_local.month
),
))
elif frequency == global_var.contract_frequency_bom:
ans = int(global_var.contract_delivery_period_index_bom.format(month = delivery_begin_dt_local.month,
day = delivery_begin_dt_local.day,
))
elif frequency == global_var.contract_frequency_quarter:
ans = (delivery_begin_dt_local.month//3)+1
elif frequency == global_var.contract_frequency_season:
if delivery_begin_dt_local.month == 4:
ans = global_var.contract_delivery_period_index_summer
elif delivery_begin_dt_local.month == 10:
ans = global_var.contract_delivery_period_index_winter
else:
raise ValueError(frequency, delivery_begin_dt_local)
elif frequency == global_var.contract_frequency_year:
ans = global_var.contract_delivery_period_index_year
elif frequency == global_var.contract_frequency_years:
ans = int(global_var.contract_delivery_period_index_years.format(nb_years = delivery_end_date_local.year - delivery_begin_dt_local.year))
elif frequency == global_var.contract_frequency_gas_year:
ans = global_var.contract_delivery_period_index_gas_year
else:
raise NotImplementedError('frequency = {0} - delivery_begin_dt_local = {1}'.format(frequency, delivery_begin_dt_local))
return ans
| 59.854167
| 153
| 0.489964
|
794cd9a98751b3c205907a9b695240808464a1fb
| 1,981
|
py
|
Python
|
parser1.py
|
divyeshBhartiya/InvoiceReader.OCR
|
2fe21d51235114a694aaf95231f164508ba84c34
|
[
"MIT"
] | null | null | null |
parser1.py
|
divyeshBhartiya/InvoiceReader.OCR
|
2fe21d51235114a694aaf95231f164508ba84c34
|
[
"MIT"
] | null | null | null |
parser1.py
|
divyeshBhartiya/InvoiceReader.OCR
|
2fe21d51235114a694aaf95231f164508ba84c34
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import filedialog
from PIL import ImageTk, Image
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
root = Tk()
root.title('Text Extractor')
newline= Label(root)
uploaded_img=Label(root)
scrollbar = Scrollbar(root)
scrollbar.pack( side = RIGHT, fill = Y )
def extract(path):
Actual_image = cv2.imread(path)
Sample_img = cv2.resize(Actual_image,(400,350))
Image_ht,Image_wd,Image_thickness = Sample_img.shape
Sample_img = cv2.cvtColor(Sample_img,cv2.COLOR_BGR2RGB)
texts = pytesseract.image_to_data(Sample_img)
mytext=""
prevy=0
for cnt,text in enumerate(texts.splitlines()):
if cnt==0:
continue
text = text.split()
if len(text)==12:
x,y,w,h = int(text[6]),int(text[7]),int(text[8]),int(text[9])
if(len(mytext)==0):
prey=y
if(prevy-y>=10 or y-prevy>=10):
print(mytext)
Label(root,text=mytext,font=('Times',15,'bold')).pack()
mytext=""
mytext = mytext + text[11]+" "
prevy=y
Label(root,text=mytext,font=('Times',15,'bold')).pack()
def show_extract_button(path):
extractBtn= Button(root,text="Extract text",command=lambda: extract(path),bg="#2f2f77",fg="gray",pady=15,padx=15,font=('Times',15,'bold'))
extractBtn.pack()
def upload():
try:
path=filedialog.askopenfilename()
image=Image.open(path)
img=ImageTk.PhotoImage(image)
uploaded_img.configure(image=img)
uploaded_img.image=img
show_extract_button(path)
except:
pass
uploadbtn = Button(root,text="Upload an image",command=upload,bg="#2f2f77",fg="gray",height=2,width=20,font=('Times',15,'bold')).pack()
newline.configure(text='\n')
newline.pack()
uploaded_img.pack()
root.mainloop()
| 31.951613
| 143
| 0.619384
|
794cdafb939a7093da5cad520aa79fa6ea900dc3
| 1,090
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/eCpriMsgType3_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/eCpriMsgType3_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/eCpriMsgType3_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class eCPRI_Msg_Type_Generic_Data_Transfer3(Base):
__slots__ = ()
_SDM_NAME = 'eCpriMsgType3'
_SDM_ATT_MAP = {
'pcid': 'eCpriMsgType3.header.pcid',
'seqid': 'eCpriMsgType3.header.seqid',
'userdata': 'eCpriMsgType3.header.header',
}
def __init__(self, parent):
super(eCPRI_Msg_Type_Generic_Data_Transfer3, self).__init__(parent)
@property
def pcid(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['pcid']))
@property
def seqid(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['seqid']))
@property
def userdata(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['userdata']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 32.058824
| 83
| 0.708257
|
794cdbde99cf3a80caf7a1045928185017eacc28
| 4,909
|
py
|
Python
|
dbt/adapters/athena/impl.py
|
JustasCe/dbt-athena
|
af61eff1b73f38815c10c15c0dc23b7339f3303f
|
[
"Apache-2.0"
] | null | null | null |
dbt/adapters/athena/impl.py
|
JustasCe/dbt-athena
|
af61eff1b73f38815c10c15c0dc23b7339f3303f
|
[
"Apache-2.0"
] | null | null | null |
dbt/adapters/athena/impl.py
|
JustasCe/dbt-athena
|
af61eff1b73f38815c10c15c0dc23b7339f3303f
|
[
"Apache-2.0"
] | null | null | null |
from uuid import uuid4
import agate
import re
import boto3
from botocore.exceptions import ClientError
from concurrent.futures import Future
from itertools import chain
from threading import Lock
from typing import Iterator, List, Optional, Tuple
from dbt.adapters.base import available
from dbt.adapters.base.impl import catch_as_completed
from dbt.adapters.sql import SQLAdapter
from dbt.adapters.athena import AthenaConnectionManager
from dbt.adapters.athena.relation import AthenaRelation, AthenaSchemaSearchMap
from dbt.contracts.graph.manifest import Manifest
from dbt.events import AdapterLogger
from dbt.utils import executor
from dbt.contracts.graph.compiled import CompileResultNode
logger = AdapterLogger("Athena")
boto3_client_lock = Lock()
class AthenaAdapter(SQLAdapter):
ConnectionManager = AthenaConnectionManager
Relation = AthenaRelation
@classmethod
def date_function(cls) -> str:
return "now()"
@classmethod
def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "string"
@classmethod
def convert_number_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))
return "double" if decimals else "integer"
@classmethod
def convert_datetime_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
return "timestamp"
@available
def s3_uuid_table_location(self):
conn = self.connections.get_thread_connection()
client = conn.handle
return f"{client.s3_staging_dir}tables/{str(uuid4())}/"
@available
def clean_up_partitions(
self, database_name: str, table_name: str, where_condition: str
):
# Look up Glue partitions & clean up
conn = self.connections.get_thread_connection()
client = conn.handle
with boto3_client_lock:
glue_client = boto3.client('glue', region_name=client.region_name)
s3_resource = boto3.resource('s3', region_name=client.region_name)
partitions = glue_client.get_partitions(
# CatalogId='123456789012', # Need to make this configurable if it is different from default AWS Account ID
DatabaseName=database_name,
TableName=table_name,
Expression=where_condition
)
p = re.compile('s3://([^/]*)/(.*)')
for partition in partitions["Partitions"]:
logger.debug("Deleting objects for partition '{}' at '{}'", partition["Values"], partition["StorageDescriptor"]["Location"])
m = p.match(partition["StorageDescriptor"]["Location"])
if m is not None:
bucket_name = m.group(1)
prefix = m.group(2)
s3_bucket = s3_resource.Bucket(bucket_name)
s3_bucket.objects.filter(Prefix=prefix).delete()
@available
def clean_up_table(
self, database_name: str, table_name: str
):
# Look up Glue partitions & clean up
conn = self.connections.get_thread_connection()
client = conn.handle
with boto3_client_lock:
glue_client = boto3.client('glue', region_name=client.region_name)
try:
table = glue_client.get_table(
DatabaseName=database_name,
Name=table_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'EntityNotFoundException':
logger.debug("Table '{}' does not exists - Ignoring", table_name)
return
if table is not None:
logger.debug("Deleting table data from'{}'", table["Table"]["StorageDescriptor"]["Location"])
p = re.compile('s3://([^/]*)/(.*)')
m = p.match(table["Table"]["StorageDescriptor"]["Location"])
if m is not None:
bucket_name = m.group(1)
prefix = m.group(2)
s3_resource = boto3.resource('s3', region_name=client.region_name)
s3_bucket = s3_resource.Bucket(bucket_name)
s3_bucket.objects.filter(Prefix=prefix).delete()
@available
def quote_seed_column(
self, column: str, quote_config: Optional[bool]
) -> str:
return super().quote_seed_column(column, False)
def _get_catalog_schemas(self, manifest: Manifest) -> AthenaSchemaSearchMap:
info_schema_name_map = AthenaSchemaSearchMap()
nodes: Iterator[CompileResultNode] = chain(
[node for node in manifest.nodes.values() if (
node.is_relational and not node.is_ephemeral_model
)],
manifest.sources.values(),
)
for node in nodes:
relation = self.Relation.create_from(self.config, node)
info_schema_name_map.add(relation)
return info_schema_name_map
| 37.189394
| 136
| 0.649012
|
794cdca05ff5119fa6e97271a066960c9773aa0b
| 669
|
py
|
Python
|
segregation/tests/test_multi_squared_coefficient_of_variation.py
|
sjsrey/segregation
|
bdf53f5423477f0c66975f994f48ce3a16000788
|
[
"BSD-3-Clause"
] | null | null | null |
segregation/tests/test_multi_squared_coefficient_of_variation.py
|
sjsrey/segregation
|
bdf53f5423477f0c66975f994f48ce3a16000788
|
[
"BSD-3-Clause"
] | null | null | null |
segregation/tests/test_multi_squared_coefficient_of_variation.py
|
sjsrey/segregation
|
bdf53f5423477f0c66975f994f48ce3a16000788
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from libpysal.examples import load_example
import geopandas as gpd
import numpy as np
from segregation.aspatial import MultiSquaredCoefficientVariation
class Multi_Squared_Coefficient_of_Variation_Tester(unittest.TestCase):
def test_Multi_Squared_Coefficient_of_Variation(self):
s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
groups_list = ['WHITE', 'BLACK', 'ASIAN','HISP']
df = s_map[groups_list]
index = MultiSquaredCoefficientVariation(df, groups_list)
np.testing.assert_almost_equal(index.statistic, 0.11875484641127525)
if __name__ == '__main__':
unittest.main()
| 37.166667
| 87
| 0.769806
|
794cddad771ab3679ebe1492ad2b656ca17837dd
| 92,765
|
py
|
Python
|
src/transformers/trainer.py
|
rmroczkowski/transformers
|
c988db5af2a5f1ccfcb5ad19bd735b6a77516637
|
[
"Apache-2.0"
] | 1
|
2021-12-27T04:48:40.000Z
|
2021-12-27T04:48:40.000Z
|
src/transformers/trainer.py
|
rmroczkowski/transformers
|
c988db5af2a5f1ccfcb5ad19bd735b6a77516637
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/trainer.py
|
rmroczkowski/transformers
|
c988db5af2a5f1ccfcb5ad19bd735b6a77516637
|
[
"Apache-2.0"
] | 1
|
2021-12-27T04:49:35.000Z
|
2021-12-27T04:49:35.000Z
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import re
import shutil
import sys
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap import auto_wrap
else:
FullyShardedDDP = None
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
model_input_name=model_input_name,
)
else:
if self.args.world_size <= 1:
return RandomSampler(self.train_dataset)
elif self.args.parallel_mode == ParallelMode.TPU and not self.args.dataloader_drop_last:
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
)
else:
return DistributedSampler(
self.train_dataset, num_replicas=self.args.world_size, rank=self.args.process_index
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif self.args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| 47.304946
| 190
| 0.642042
|
794cdf410a99d064d1b13bf39145209ba8d5d837
| 1,230
|
py
|
Python
|
autoio/projrot_io/reader.py
|
sjklipp/autoio
|
e2b471e9c9dec933319c98a30d4d519ca5d47645
|
[
"Apache-2.0"
] | null | null | null |
autoio/projrot_io/reader.py
|
sjklipp/autoio
|
e2b471e9c9dec933319c98a30d4d519ca5d47645
|
[
"Apache-2.0"
] | null | null | null |
autoio/projrot_io/reader.py
|
sjklipp/autoio
|
e2b471e9c9dec933319c98a30d4d519ca5d47645
|
[
"Apache-2.0"
] | null | null | null |
"""
Functions to read in the projected frequencies generated by ProjRot
"""
def rpht_output(output_str):
""" Parses ProjRot frequency output file strings for the
projected vibrational frequencies, sorted in ascending order.
Works for the output of both
(1) rotation-translation projections and
(2) rotation-translation/hindered-rotor projections.
:param output_str: string of lines of ProjRot output file
:type output_str: str
:rtype: (list(float), list(float)
"""
# Read the file and read in the non-zero frequencies
freqs = []
for line in output_str.splitlines():
line = line.strip()
if line != '':
freqs.append(float(line))
# Build lists for the real and imaginary frequencies
real_freqs = []
imag_freqs = []
for freq in freqs:
# Ignore zeros and grab the negative vals from projrot out_str
if freq != 0.0:
if freq > 0.0:
real_freqs.append(freq)
else:
imag_freqs.append(-1.0*freq)
# Sort imaginary freqeuncies in descending order
imag_freqs.sort(reverse=True)
return sorted(real_freqs), sorted(imag_freqs)
| 30
| 70
| 0.630894
|
794cdfd862368bc2c77596b409ff7a4ae5d18d24
| 687
|
py
|
Python
|
python/test/misc/test_to_json.py
|
takashiharano/util
|
0f730475386a77415545de3f9763e5bdeaab0e94
|
[
"MIT"
] | null | null | null |
python/test/misc/test_to_json.py
|
takashiharano/util
|
0f730475386a77415545de3f9763e5bdeaab0e94
|
[
"MIT"
] | null | null | null |
python/test/misc/test_to_json.py
|
takashiharano/util
|
0f730475386a77415545de3f9763e5bdeaab0e94
|
[
"MIT"
] | null | null | null |
#!python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import util
def test_to_json():
data = {
'key1': 'val1',
'key2': 'val2',
'key3': [1, 2, 3],
'key4': {
'key4-1': 1,
'key4-2': 2,
'key4-3': 3
}
}
s = util.to_json(data) + '\n'
s += util.to_json(data, indent=2) + '\n'
return s
def test():
ret = 'test_to_json() = ' + test_to_json() + '\n'
return ret
def main():
try:
ret = test()
except Exception as e:
ret = str(e)
#util.send_response('text', ret, encoding='utf-8')
#util.send_response('text', ret, encoding='shift_jis')
util.send_response('text', ret)
main()
| 16.756098
| 65
| 0.55313
|
794ce0573a07476015403be2341c8b297fad9c3a
| 1,142
|
py
|
Python
|
sinesum1.py
|
chapman-phys227-2016s/hw-1-seama107
|
52d942891c15a6e575f5c77e5378ed7cc17bdcc3
|
[
"MIT"
] | null | null | null |
sinesum1.py
|
chapman-phys227-2016s/hw-1-seama107
|
52d942891c15a6e575f5c77e5378ed7cc17bdcc3
|
[
"MIT"
] | null | null | null |
sinesum1.py
|
chapman-phys227-2016s/hw-1-seama107
|
52d942891c15a6e575f5c77e5378ed7cc17bdcc3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import math
def f(t, T):
"""
returns -1, 0, or 1 based on relationship between t and T
throws IndexError
"""
if(t > 0 and t < float(T/2)):
return 1
elif(t == float(T/2)):
return 0
elif(t > float(T/2) and t < T):
return -1
raise IndexError("Out of function domain")
def S(t, n, T):
"""
Sinosoidal approximation of f with n as the number of approximations
"""
output = 0.0
for i in range(n):
output += math.sin(2 * math.pi * t * (2*i + 1) / T) / (2*i + 1)
return (4/math.pi) * output
def test_f():
assert(f(0,0) == 0)
assert(f(.5,2) == 1)
assert(f(1.5,2) == -1)
def calc_error(n,a,T):
apr = S(a*T, n, T)
exact = f(a*T,T)
return exact - apr
def print_error_results(list_of_n = [1,3,5,10,30,100], list_of_alpha = [.01,.25,.49]):
print "alphas:",
for a in list_of_alpha:
print " {0:.3g}".format(a),
print
for n in list_of_n:
print "n: {0} ".format(n),
for a in list_of_alpha:
print " {0:+.3g}".format(calc_error(n,a,2*math.pi)),
print
| 22.392157
| 86
| 0.52627
|
794ce26032d5715f901627df5781d455ed1df27a
| 5,291
|
py
|
Python
|
DPM/src/DPM.py
|
OSADP/PMA
|
2dd81f55ee6e15eff4462506560faea44ea49a0e
|
[
"Apache-2.0"
] | null | null | null |
DPM/src/DPM.py
|
OSADP/PMA
|
2dd81f55ee6e15eff4462506560faea44ea49a0e
|
[
"Apache-2.0"
] | null | null | null |
DPM/src/DPM.py
|
OSADP/PMA
|
2dd81f55ee6e15eff4462506560faea44ea49a0e
|
[
"Apache-2.0"
] | 1
|
2020-02-02T18:10:59.000Z
|
2020-02-02T18:10:59.000Z
|
"""
The purpose of the prototype DMA Performance Measurement application is to
measures mode-independent trip-based traveler mobility and system productivity
by taking trip or trajectory based vehicle input and aggregating that
information into system wide performance measures. The DMA Performance
Measurement application uses trip-based system performance measure algorithms
developed under the Integrated Corridor Management (ICM) Program and adapts
them for use with observed data to measure travel time reliability, delay,and
throughput.
The program has four(4) files:
DPM.py - Main program
Files.py - Classes used to read in all of the input files
sqlload.py - Classes to control the program's interface with the SQLlite
database
timeslice.py - Class that is used for managing and determining the individual
time slices based on the trip starting time
To run the program type the following from a command prompt:
>>python DPM.py -file [your control file name]
"""
__author__ = 'Jim Larkin (Noblis)'
__date__ = 'February 2012'
__credits__ = ["Meenakshy Vasudevan (Noblis)","Karl Wunderlich (Noblis)"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Jim Larkin (Noblis)"
__email__ = "jlarkin@noblis.org"
__status__ = "Prototype"
from Files import ControlFile, ConditionFile, Free_Flow_File
from sqlload import Sqllite_DB
from timeslice import Timeslice
def run_stats(db):
"""
Core procedure of the DMA Performance Measurement application that runs
through all of the statistics for generating the performance measures and
loads them into the SQLlite database.
@param db - Sqllite_DB object that controls all calls to the the database.
@return None
"""
print "Calculating Performance Measures"
#create and populate condition table
db.create_condition_results_table()
#get list count of each area
conditions = db.list_conditions()
ods = db.list_ods()
timeslices = db.list_timeperiod()
modes = db.list_mode()
print "Conditions {} ODs {} Timeperiods {} Modes {}".format(len(conditions),
len(ods), len(timeslices), len(modes))
db.create_weighted_table()
#Add Median, reliable trips by Mode and time period for all conditions,
#origins, destinations and time period
for condition in conditions:
ods = db.list_ods(condition)
print "ODs {} for condition {}".format(len(ods), str(condition))
for O, D in ods:
for ts in timeslices:
for mode in modes:
#Add median travel time and reliable trips to database by mode
db.update_condition_results(condition, O, D, ts, mode)
if condition == conditions[-1]:
db.create_weighted_results( O, D, ts, mode)
#Add median travel time and reliable trips to database by time period
db.update_condition_results(condition, O, D, ts)
if condition == conditions[-1]:
db.create_weighted_results(O, D, ts)
#Add Planning index for weighted results table
for O, D in ods:
for ts in timeslices:
for mode in modes:
db.add_planning_index_weighted_results(O,D,ts,mode)
db.commit()
db.add_planning_index_weighted_results(O,D,ts)
db.commit()
db.commit()
#create and populates the system table
db.create_system_table()
def main():
"""
Main procedure of the DMA Performance Measurement application that reads
in all input files, creates the database and runs the run_stats procedure.
Note this procedure uses Python's argparse library which is only available
in Python 2.7 or higher.
@param None
@return None
"""
import argparse
parser = argparse.ArgumentParser(description="""
'DMA Performance Measurement application was designed by Noblis. """)
parser.add_argument('-file', help="""control file for program. if not given
then file defaults to master.in""", default='master.in')
args = parser.parse_args().__dict__
#read control file
if args['file'] =='master.in':
print "No control file given using default control file: master.in"
cf = ControlFile(args['file'])
cf.validate()
#create Time_slice Object
ts = Timeslice(cf.start_time, cf.end_time, cf.time_period_length)
files = cf.conditions_data
#Create Database Object.
db = Sqllite_DB()
#Create Database
db.create_db(cf.database_file)
#Create Database trip table
db.create_trips_table()
#Create Database condition table
db.create_condition_table(files)
#create Free Flow File
free_flow_file = Free_Flow_File(cf.free_flow_file)
free_flow_file.load_file()
#create and load Free_flow Table
db.create_free_flow_table(free_flow_file)
#load each condition file into trip table
for id, f, pro, file_type in files:
condition_file = ConditionFile(f, id, db, ts, file_type)
condition_file.load_file()
#Run Stats on Data
run_stats(db)
print "Program Complete"
#Runs main procedure when the file is ran
if __name__ == "__main__":
main()
| 32.262195
| 85
| 0.686449
|
794ce397331f451655b9c432b108708653db6223
| 980
|
py
|
Python
|
Algorithms/0005_Longest_Palindromic_Substring.py
|
drjordy66/LeetCode
|
ba0c04ee5ddc8c9177dd2995be95dd6d0640bc38
|
[
"MIT"
] | null | null | null |
Algorithms/0005_Longest_Palindromic_Substring.py
|
drjordy66/LeetCode
|
ba0c04ee5ddc8c9177dd2995be95dd6d0640bc38
|
[
"MIT"
] | null | null | null |
Algorithms/0005_Longest_Palindromic_Substring.py
|
drjordy66/LeetCode
|
ba0c04ee5ddc8c9177dd2995be95dd6d0640bc38
|
[
"MIT"
] | null | null | null |
class Solution:
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
x = [i for i in s]
if x != []:
longest = x[0]
else:
longest = ''
for i in range(len(x)):
indices = [index for index, value in enumerate(x[i + 1:]) if value == x[i]]
indices = [z + i + 1 for z in indices]
for j in sorted(indices, reverse=True):
if len(x[i:j + 1]) > len(longest):
forward = x[i:j + 1]
backward = x[i:j + 1]
backward.reverse()
if forward == backward:
longest = ''.join(forward)
break
else:
pass
else:
break
if len(longest) > len(x[i:]):
break
else:
pass
return longest
| 30.625
| 87
| 0.366327
|
794ce577037e302342b864852b6f22a93d7a05a9
| 7,143
|
py
|
Python
|
setup.py
|
andsor/pydevs
|
b4e33f9d235d6ea0b694033b32fb201caac3acf7
|
[
"Apache-2.0"
] | 3
|
2015-10-25T18:20:54.000Z
|
2020-03-14T11:22:28.000Z
|
setup.py
|
andsor/pydevs
|
b4e33f9d235d6ea0b694033b32fb201caac3acf7
|
[
"Apache-2.0"
] | 15
|
2015-02-20T19:46:52.000Z
|
2019-02-15T09:44:40.000Z
|
setup.py
|
andsor/pydevs
|
b4e33f9d235d6ea0b694033b32fb201caac3acf7
|
[
"Apache-2.0"
] | 4
|
2019-01-11T10:12:25.000Z
|
2021-05-19T21:32:23.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for devs.
This file was generated with PyScaffold 1.2, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import inspect
import os
import sys
from distutils.cmd import Command
import setuptools
from setuptools import setup
from setuptools.command.test import test as TestCommand
from distutils.extension import Extension
from Cython.Build import cythonize
import versioneer
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# Change these settings according to your needs
MAIN_PACKAGE = "devs"
DESCRIPTION = (
"A Python wrapper of adevs, a C++ library implementing the Discrete Event "
"System Specification (DEVS)"
)
LICENSE = "apache"
URL = "http://github.com/andsor/pydevs"
AUTHOR = "Andreas Sorge"
EMAIL = "as@asorge.de"
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
]
# Add here console scripts like ['hello_world = devs.module:function']
CONSOLE_SCRIPTS = []
# Versioneer configuration
versioneer.VCS = 'git'
versioneer.versionfile_source = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.versionfile_build = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.tag_prefix = 'v' # tags are like 1.2.0
versioneer.parentdir_prefix = MAIN_PACKAGE + '-'
class Tox(TestCommand):
user_options = [
('tox-args=', 'a', "Arguments to pass to tox"),
]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
errno = tox.cmdline(
args=shlex.split(self.tox_args) if self.tox_args else None
)
sys.exit(errno)
class ToxAutoDocs(Tox):
def finalize_options(self):
Tox.finalize_options(self)
if self.tox_args is None:
self.tox_args = ''
self.tox_args += ' -e autodocs '
def sphinx_builder():
try:
from sphinx.setup_command import BuildDoc
except ImportError:
class NoSphinx(Command):
user_options = []
def initialize_options(self):
raise RuntimeError("Sphinx documentation is not installed, "
"run: pip install sphinx")
return NoSphinx
class BuildSphinxDocs(BuildDoc):
def run(self):
if self.builder == "doctest":
import sphinx.ext.doctest as doctest
# Capture the DocTestBuilder class in order to return the total
# number of failures when exiting
ref = capture_objs(doctest.DocTestBuilder)
BuildDoc.run(self)
errno = ref[-1].total_failures
sys.exit(errno)
else:
BuildDoc.run(self)
return BuildSphinxDocs
class ObjKeeper(type):
instances = {}
def __init__(cls, name, bases, dct):
cls.instances[cls] = []
def __call__(cls, *args, **kwargs):
cls.instances[cls].append(super(ObjKeeper, cls).__call__(*args,
**kwargs))
return cls.instances[cls][-1]
def capture_objs(cls):
from six import add_metaclass
module = inspect.getmodule(cls)
name = cls.__name__
keeper_class = add_metaclass(ObjKeeper)(cls)
setattr(module, name, keeper_class)
cls = getattr(module, name)
return keeper_class.instances[cls]
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.split("\\n") if req != '']
def read(fname):
return open(os.path.join(__location__, fname)).read()
def setup_package():
# Assemble additional setup commands
cmdclass = versioneer.get_cmdclass()
cmdclass['docs'] = sphinx_builder()
cmdclass['doctest'] = sphinx_builder()
cmdclass['test'] = Tox
cmdclass['autodocs'] = ToxAutoDocs
# Some helper variables
version = versioneer.get_version()
docs_path = os.path.join(__location__, "docs")
docs_build_path = os.path.join(docs_path, "_build")
install_reqs = get_install_requirements("requirements.txt")
extra_doc_reqs = get_install_requirements("requirements-doc.txt")
command_options = {
'docs': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path)},
'doctest': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path),
'builder': ('setup.py', 'doctest')},
'test': {'test_suite': ('setup.py', 'tests')},
}
# extensions
devs_extension = Extension("devs.devs",
sources=['devs/devs.pyx'],
language='c++',
include_dirs=['vendor/adevs/include', ],
extra_compile_args=['--std=c++11', ])
setup(name=MAIN_PACKAGE,
version=version,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=install_reqs,
setup_requires=['six', 'setuptools_git>=1.1'],
cmdclass=cmdclass,
tests_require=['tox'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS},
extras_require={
'docs': extra_doc_reqs,
},
include_package_data=True, # include everything in source control
# but exclude these files
exclude_package_data={'': ['.gitignore']},
ext_modules=cythonize(devs_extension,
compiler_directives={'language_level': 3,
'unraisable_tracebacks': True}),
)
if __name__ == "__main__":
setup_package()
| 31.888393
| 79
| 0.606888
|
794ce6c08a1e9f19ebc41053e7895ed0dac2dcd0
| 1,852
|
py
|
Python
|
backend/pyrogram/methods/users/update_username.py
|
appheap/social-media-analyzer
|
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
|
[
"Apache-2.0"
] | 5
|
2021-09-11T22:01:15.000Z
|
2022-03-16T21:33:42.000Z
|
backend/pyrogram/methods/users/update_username.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | null | null | null |
backend/pyrogram/methods/users/update_username.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | 3
|
2022-01-18T11:06:22.000Z
|
2022-02-26T13:39:28.000Z
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from typing import Optional
from pyrogram import raw
from pyrogram.scaffold import Scaffold
class UpdateUsername(Scaffold):
async def update_username(
self,
username: Optional[str]
) -> bool:
"""Update your own username.
This method only works for users, not bots. Bot usernames must be changed via Bot Support or by recreating
them from scratch using BotFather. To update a channel or supergroup username you can use
:meth:`~pyrogram.Client.update_chat_username`.
Parameters:
username (``str`` | ``None``):
Username to set. "" (empty string) or None to remove it.
Returns:
``bool``: True on success.
Example:
.. code-block:: python
app.update_username("new_username")
"""
return bool(
await self.send(
raw.functions.account.UpdateUsername(
username=username or ""
)
)
)
| 33.071429
| 114
| 0.649028
|
794ce702994b46c527488675696ac767656f22a9
| 1,266
|
py
|
Python
|
tensorflow/contrib/__init__.py
|
sylviawhoa/tensorflow
|
30f3cdfc420d831e2591cce62fa51164cf8a700a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/__init__.py
|
sylviawhoa/tensorflow
|
30f3cdfc420d831e2591cce62fa51164cf8a700a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/__init__.py
|
sylviawhoa/tensorflow
|
30f3cdfc420d831e2591cce62fa51164cf8a700a
|
[
"Apache-2.0"
] | 1
|
2020-10-02T16:06:39.000Z
|
2020-10-02T16:06:39.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import ctc
from tensorflow.contrib import distributions
from tensorflow.contrib import framework
from tensorflow.contrib import layers
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import testing
from tensorflow.contrib import util
| 40.83871
| 80
| 0.755924
|
794ce840020cde1aedd096f88bf83f924e9ce449
| 2,843
|
py
|
Python
|
examples/Mentor/09.5.GenSph.py
|
peterlama/pivy
|
ad7b50f9a3ce0b69d05184c059fd6de12b90839b
|
[
"0BSD"
] | null | null | null |
examples/Mentor/09.5.GenSph.py
|
peterlama/pivy
|
ad7b50f9a3ce0b69d05184c059fd6de12b90839b
|
[
"0BSD"
] | null | null | null |
examples/Mentor/09.5.GenSph.py
|
peterlama/pivy
|
ad7b50f9a3ce0b69d05184c059fd6de12b90839b
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
###
# Copyright (c) 2002-2007 Systems in Motion
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
###
# This is an example from The Inventor Mentor,
# chapter 9, example 5.
#
# Using a callback for generated primitives.
# A simple scene with a sphere is created.
# A callback is used to write out the triangles that
# form the sphere in the scene.
#
import sys
from pivy.coin import *
##############################################################
# CODE FOR The Inventor Mentor STARTS HERE
def printVertex(vertex):
point = vertex.getPoint()
print("\tCoords = (%g, %g, %g)" % (point[0], point[1], point[2]))
normal = vertex.getNormal()
print("\tNormal = (%g, %g, %g)" % (normal[0], normal[1], normal[2]))
def printHeaderCallback(void, callbackAction, node):
print("\n Sphere ")
# Print the node name (if it exists) and address
if not not node.getName():
print('named "%s" ' % node.getName().getString())
print("at address %r\n" % node.this)
return SoCallbackAction.CONTINUE
def printTriangleCallback(void, callbackAction, vertex1, vertex2, vertex3):
print("Triangle:")
printVertex(vertex1)
printVertex(vertex2)
printVertex(vertex3)
def printSpheres(root):
myAction = SoCallbackAction()
myAction.addPreCallback(SoSphere.getClassTypeId(), printHeaderCallback, None)
myAction.addTriangleCallback(SoSphere.getClassTypeId(), printTriangleCallback, None)
myAction.apply(root)
# CODE FOR The Inventor Mentor ENDS HERE
##############################################################
def main():
# Initialize Inventor
# SoDB.init() invoked automatically upon coin module import
# Make a scene containing a red sphere
root = SoSeparator()
myCamera = SoPerspectiveCamera()
myMaterial = SoMaterial()
root.addChild(myCamera)
root.addChild(SoDirectionalLight())
myMaterial.diffuseColor = (1.0, 0.0, 0.0) # Red
root.addChild(myMaterial)
root.addChild(SoSphere())
# Write out the triangles that form the sphere in the scene
printSpheres(root)
return 0
if __name__ == "__main__":
sys.exit(main())
| 31.588889
| 88
| 0.680267
|
794ce891294131c9b9a2800d614d9b3f73f4cf94
| 1,519
|
py
|
Python
|
TeamProject/src/PollardRho.py
|
cboyer2016/CSE4081TeamProject
|
357b182d9cb1be62e03600211b75f36b88dbd964
|
[
"MIT"
] | 1
|
2021-09-25T20:42:03.000Z
|
2021-09-25T20:42:03.000Z
|
TeamProject/src/PollardRho.py
|
cboyer2016/CSE4081TeamProject
|
357b182d9cb1be62e03600211b75f36b88dbd964
|
[
"MIT"
] | null | null | null |
TeamProject/src/PollardRho.py
|
cboyer2016/CSE4081TeamProject
|
357b182d9cb1be62e03600211b75f36b88dbd964
|
[
"MIT"
] | null | null | null |
#extened euclid algorithm found via wikipedia and supporting online sources
def euclid_ext(a, b):
if b == 0:
return a, 1, 0
else:
d, xx, yy = euclid_ext(b, a % b)
x = yy
y = xx - (a / b) * yy
return d, x, y
def inverse(a, n):
return euclid_ext(a, n)[1]
def xab(x, a, b, base, value, prime, halfPrime):
sub = x % 3
if sub == 0:
x = x*base % prime
a = (a+1) % halfPrime
if sub == 1:
x = x * value % prime
b = (b + 1) % halfPrime
if sub == 2:
x = x*x % prime
a = a*2 % halfPrime
b = b*2 % halfPrime
return x, a, b
def rho(base, value,prime):
halfPrime = (prime - 1)/2
x = base*value
a = 1
b = 1
X = x
A = a
B = b
for i in xrange(1,prime):
x, a, b = xab(x, a, b, base, value, prime, halfPrime)
X, A, B = xab(X, A, B, base, value, prime, halfPrime)
X, A, B = xab(X, A, B, base, value, prime, halfPrime)
if x == X:
break
s = a-A
t = B-b
res = (inverse(t, halfPrime) * s) % halfPrime
if check(base, value, prime, res):
return res
return res + halfPrime
def check(base, value, prime, exponent):
return pow(base,exponent,prime) == value
if __name__ == "__main__":
base = 2
value = 10
prime = 1019
exponent = rho(base,value,prime)
print ("{}^{} = {} (mod {})".format(base,exponent,value,prime))
print "Status: ", check(base,value,prime, exponent)
| 21.394366
| 75
| 0.508887
|
794ce8d3cb9138e42b1c68e9be9f0d7cc328eedc
| 6,174
|
py
|
Python
|
corehq/motech/requests.py
|
akyogi/commcare-hq
|
44c34634e1b54f566ca200f828ea2aa112f33aa4
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/motech/requests.py
|
akyogi/commcare-hq
|
44c34634e1b54f566ca200f828ea2aa112f33aa4
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/motech/requests.py
|
akyogi/commcare-hq
|
44c34634e1b54f566ca200f828ea2aa112f33aa4
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from django.conf import settings
import requests
from dimagi.utils.logging import notify_exception
from corehq.apps.hqwebapp.tasks import send_mail_async
from corehq.motech.const import REQUEST_TIMEOUT
from corehq.motech.models import RequestLog
from corehq.motech.utils import pformat_json
def log_request(func):
def request_wrapper(self, *args, **kwargs):
log_level = logging.INFO
request_error = ''
response_status = None
response_body = ''
try:
response = func(self, *args, **kwargs)
response_status = response.status_code
response_body = response.content
except Exception as err:
log_level = logging.ERROR
request_error = str(err)
if getattr(err, 'response', None) is not None:
response_status = err.response.status_code
response_body = pformat_json(err.response.text)
raise
else:
return response
finally:
# args will be Requests method, url, and optionally params, data or json.
# kwargs may include Requests method kwargs and raise_for_status.
kwargs.pop('raise_for_status', None)
RequestLog.log(
log_level,
self.domain_name,
self.payload_id,
request_error,
response_status,
response_body,
*args,
**kwargs
)
return request_wrapper
class Requests(object):
"""
Wraps the requests library to simplify use with JSON REST APIs.
Sets auth headers automatically, and requests JSON responses by
default.
To maintain a session of authenticated non-API requests, use
Requests as a context manager.
"""
def __init__(self, domain_name, base_url, username, password,
verify=True, notify_addresses=None, payload_id=None):
"""
Initialise instance
:param domain_name: Domain to store logs under
:param base_url: Remote API base URL
:param username: Remote API username
:param password: Remote API plaintext password
:param verify: Verify SSL certificate?
:param notify_addresses: A list of email addresses to notify of
errors.
:param payload_id: The ID of the case or form submission
associated with this request
"""
self.domain_name = domain_name
self.base_url = base_url
self.username = username
self.password = password
self.verify = verify
self.notify_addresses = [] if notify_addresses is None else notify_addresses
self.payload_id = payload_id
self._session = None
def __enter__(self):
self._session = requests.Session()
return self
def __exit__(self, *args):
self._session.close()
self._session = None
@log_request
def send_request(self, method, *args, **kwargs):
raise_for_status = kwargs.pop('raise_for_status', False)
if not self.verify:
kwargs['verify'] = False
kwargs.setdefault('timeout', REQUEST_TIMEOUT)
if self._session:
response = self._session.request(method, *args, **kwargs)
else:
# Mimics the behaviour of requests.api.request()
with requests.Session() as session:
response = session.request(method, *args, **kwargs)
if raise_for_status:
response.raise_for_status()
return response
def get_url(self, uri):
return '/'.join((self.base_url.rstrip('/'), uri.lstrip('/')))
def delete(self, uri, **kwargs):
kwargs.setdefault('headers', {'Accept': 'application/json'})
return self.send_request('DELETE', self.get_url(uri),
auth=(self.username, self.password), **kwargs)
def get(self, uri, *args, **kwargs):
kwargs.setdefault('headers', {'Accept': 'application/json'})
kwargs.setdefault('allow_redirects', True)
return self.send_request('GET', self.get_url(uri), *args,
auth=(self.username, self.password), **kwargs)
def post(self, uri, data=None, json=None, *args, **kwargs):
kwargs.setdefault('headers', {
'Content-type': 'application/json',
'Accept': 'application/json'
})
return self.send_request('POST', self.get_url(uri), *args,
data=data, json=json,
auth=(self.username, self.password), **kwargs)
def notify_exception(self, message=None, details=None):
self.notify_error(message, details)
notify_exception(None, message, details)
def notify_error(self, message, details=None):
if not self.notify_addresses:
return
message_lines = [
message,
f'Project space: {self.domain_name}',
f'Remote API base URL: {self.base_url}',
f'Remote API username: {self.username}',
]
if self.payload_id:
message_lines.append(f'Payload ID: {self.payload_id}')
if details:
message_lines.extend(['', '', details])
send_mail_async.delay(
'MOTECH Error',
'\r\n'.join(message_lines),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=self.notify_addresses,
)
def parse_request_exception(err):
"""
Parses an instance of RequestException and returns a request
string and response string tuple
"""
err_request = '{method} {url}\n\n{body}'.format(
method=err.request.method,
url=err.request.url,
body=err.request.body
) if err.request.body else ' '.join((err.request.method, err.request.url))
if err.response:
err_content = pformat_json(err.response.content) # pformat_json returns non-JSON values unchanged
err_response = '\n\n'.join((str(err), err_content))
else:
err_response = str(err)
return err_request, err_response
| 35.079545
| 106
| 0.607548
|
794ce907cdc266ffdce59ac09914d4289e868555
| 508
|
py
|
Python
|
wcics/server/routes/auth/users.py
|
CS-Center/CS-Center
|
3cd09f29d214406e6618fc67b9faf59a18f3f11b
|
[
"MIT"
] | null | null | null |
wcics/server/routes/auth/users.py
|
CS-Center/CS-Center
|
3cd09f29d214406e6618fc67b9faf59a18f3f11b
|
[
"MIT"
] | 6
|
2019-12-06T18:06:28.000Z
|
2021-12-01T20:19:05.000Z
|
wcics/server/routes/auth/users.py
|
CS-Center/CS-Center
|
3cd09f29d214406e6618fc67b9faf59a18f3f11b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from wcics import app
from wcics.auth.manage_user import user
from wcics.server.consts import USERS_PER_PAGE
from wcics.server.routes.utils import paged_data
from wcics.database.models import Users
from flask import render_template
@app.route("/users/")
def serve_user_home():
page, pages, users = paged_data(Users.query.order_by(Users.username).all(), USERS_PER_PAGE)
return render_template("account/users.html", active = "Users", page = page, pages = pages, users = users)
| 29.882353
| 107
| 0.76378
|
794ce90cd88276372c2b83d4bc628d29a6b93d2e
| 2,306
|
py
|
Python
|
aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ResetAccountRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ResetAccountRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ResetAccountRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ResetAccountRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'ResetAccount','polardb')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_AccountPassword(self):
return self.get_query_params().get('AccountPassword')
def set_AccountPassword(self,AccountPassword):
self.add_query_param('AccountPassword',AccountPassword)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_DBClusterId(self):
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self,DBClusterId):
self.add_query_param('DBClusterId',DBClusterId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 34.939394
| 79
| 0.775369
|
794ce9358c0ca450d09eb08800b010a91bd25452
| 1,302
|
py
|
Python
|
bandwidth/webrtc/exceptions/error_exception.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 5
|
2020-11-04T14:29:37.000Z
|
2022-02-23T20:33:07.000Z
|
bandwidth/webrtc/exceptions/error_exception.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 3
|
2021-07-23T18:48:48.000Z
|
2022-03-15T14:59:07.000Z
|
bandwidth/webrtc/exceptions/error_exception.py
|
roverdotcom/python-sdk
|
c6947fb3331b77f0064aeec2dcf0c4ff178de34c
|
[
"MIT"
] | 8
|
2020-04-14T09:22:53.000Z
|
2022-03-11T10:46:06.000Z
|
# -*- coding: utf-8 -*-
"""
bandwidth
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
from bandwidth.api_helper import APIHelper
import bandwidth.exceptions.api_exception
class ErrorException(bandwidth.exceptions.api_exception.APIException):
def __init__(self, reason, response):
"""Constructor for the ErrorException class
Args:
reason (string): The reason (or error message) for the Exception
to be raised.
response (HttpResponse): The HttpResponse of the API call.
"""
super(ErrorException, self).__init__(reason, response)
dictionary = APIHelper.json_deserialize(self.response.text)
if isinstance(dictionary, dict):
self.unbox(dictionary)
def unbox(self, dictionary):
"""Populates the properties of this object by extracting them from a dictionary.
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
"""
self.code = dictionary.get('code')
self.message = dictionary.get('message')
| 32.55
| 89
| 0.646697
|
794cea9b83d983f1b63f465f76cf560b6998c625
| 3,862
|
py
|
Python
|
heat/engine/clients/os/zun.py
|
odmanV2/heat
|
76c20f1fc94a06ce5a00730c50952efe19ed0e3e
|
[
"Apache-2.0"
] | 265
|
2015-01-02T09:33:22.000Z
|
2022-03-26T23:19:54.000Z
|
heat/engine/clients/os/zun.py
|
HyunJin-Jeong/heat
|
8353fddf9ebfb0eca67d6f2b2feb529031acff89
|
[
"Apache-2.0"
] | 8
|
2015-09-01T15:43:19.000Z
|
2021-12-14T05:18:23.000Z
|
heat/engine/clients/os/zun.py
|
HyunJin-Jeong/heat
|
8353fddf9ebfb0eca67d6f2b2feb529031acff89
|
[
"Apache-2.0"
] | 295
|
2015-01-06T07:00:40.000Z
|
2021-09-06T08:05:06.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import tenacity
from zunclient import client as zun_client
from zunclient import exceptions as zc_exc
from heat.engine.clients import client_plugin
CLIENT_NAME = 'zun'
class ZunClientPlugin(client_plugin.ClientPlugin):
service_types = [CONTAINER] = ['container']
default_version = '1.12'
supported_versions = [
V1_12, V1_18, V1_36,
] = [
'1.12', '1.18', '1.36',
]
def _create(self, version=None):
if not version:
version = self.default_version
interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
args = {
'interface': interface,
'service_type': self.CONTAINER,
'session': self.context.keystone_session,
'region_name': self._get_region_name()
}
client = zun_client.Client(version, **args)
return client
def update_container(self, container_id, **prop_diff):
if prop_diff:
self.client(version=self.V1_18).containers.update(
container_id, **prop_diff)
def network_detach(self, container_id, port_id):
with self.ignore_not_found:
self.client(version=self.V1_18).containers.network_detach(
container_id, port=port_id)
return True
def network_attach(self, container_id, port_id=None, net_id=None, fip=None,
security_groups=None):
with self.ignore_not_found:
kwargs = {}
if port_id:
kwargs['port'] = port_id
if net_id:
kwargs['network'] = net_id
if fip:
kwargs['fixed_ip'] = fip
self.client(version=self.V1_18).containers.network_attach(
container_id, **kwargs)
return True
@tenacity.retry(
stop=tenacity.stop_after_attempt(
cfg.CONF.max_interface_check_attempts),
wait=tenacity.wait_exponential(multiplier=0.5, max=12.0),
retry=tenacity.retry_if_result(client_plugin.retry_if_result_is_false))
def check_network_detach(self, container_id, port_id):
with self.ignore_not_found:
interfaces = self.client(
version=self.V1_18).containers.network_list(container_id)
for iface in interfaces:
if iface.port_id == port_id:
return False
return True
@tenacity.retry(
stop=tenacity.stop_after_attempt(
cfg.CONF.max_interface_check_attempts),
wait=tenacity.wait_exponential(multiplier=0.5, max=12.0),
retry=tenacity.retry_if_result(client_plugin.retry_if_result_is_false))
def check_network_attach(self, container_id, port_id):
if not port_id:
return True
interfaces = self.client(version=self.V1_18).containers.network_list(
container_id)
for iface in interfaces:
if iface.port_id == port_id:
return True
return False
def is_not_found(self, ex):
return isinstance(ex, zc_exc.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, zc_exc.RequestEntityTooLarge)
def is_conflict(self, ex):
return isinstance(ex, zc_exc.Conflict)
| 33.877193
| 79
| 0.640342
|
794cebd14d80bae4c5a7743083b78e6cb8798fee
| 1,192
|
py
|
Python
|
crawlerSystem/sports1/sports1/pipelines.py
|
Nouldine/Cloud-based-Big-data-project
|
4649b7ac3964101d7a484d7d0c1481f23e70a7b1
|
[
"MIT"
] | null | null | null |
crawlerSystem/sports1/sports1/pipelines.py
|
Nouldine/Cloud-based-Big-data-project
|
4649b7ac3964101d7a484d7d0c1481f23e70a7b1
|
[
"MIT"
] | null | null | null |
crawlerSystem/sports1/sports1/pipelines.py
|
Nouldine/Cloud-based-Big-data-project
|
4649b7ac3964101d7a484d7d0c1481f23e70a7b1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
class Sports1Pipeline(object):
def process_item(self, item, spider):
return item
class MongoDBPipeline( object ):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = connection[ settings['MONGODB_DB'] ]
self.collection = db[ settings['MONGODB_COLLECTION'] ]
def process_item( self, item, spider ):
valid = True
for data in item:
if not data:
valid = False
raise DropItem("Missing {0}".format(data))
if valid:
self.collection.insert( dict(item) )
log.msg("Question added to MongoDB database!",
level=log.DEBUG, spider=spider)
return item
| 17.529412
| 65
| 0.577181
|
794cebe02790ef00e5c5ca801467ec7c1d17a683
| 48,681
|
py
|
Python
|
python/ccxt/async_support/bigone.py
|
ngugcx/ccxt
|
57133bf1d129f962ed9aa861006257d55e43000c
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/bigone.py
|
ngugcx/ccxt
|
57133bf1d129f962ed9aa861006257d55e43000c
|
[
"MIT"
] | 1
|
2022-01-27T19:54:13.000Z
|
2022-01-27T19:54:13.000Z
|
python/ccxt/async_support/bigone.py
|
ngugcx/ccxt
|
57133bf1d129f962ed9aa861006257d55e43000c
|
[
"MIT"
] | 1
|
2022-03-15T22:51:08.000Z
|
2022-03-15T22:51:08.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
class bigone(Exchange):
def describe(self):
return self.deep_extend(super(bigone, self).describe(), {
'id': 'bigone',
'name': 'BigONE',
'countries': ['CN'],
'version': 'v3',
'rateLimit': 1200, # 500 request per 10 minutes
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': 'min1',
'5m': 'min5',
'15m': 'min15',
'30m': 'min30',
'1h': 'hour1',
'3h': 'hour3',
'4h': 'hour4',
'6h': 'hour6',
'12h': 'hour12',
'1d': 'day1',
'1w': 'week1',
'1M': 'month1',
},
'hostname': 'big.one', # or 'bigone.com'
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/69354403-1d532180-0c91-11ea-88ed-44c06cefdf87.jpg',
'api': {
'public': 'https://{hostname}/api/v3',
'private': 'https://{hostname}/api/v3/viewer',
},
'www': 'https://big.one',
'doc': 'https://open.big.one/docs/api.html',
'fees': 'https://bigone.zendesk.com/hc/en-us/articles/115001933374-BigONE-Fee-Policy',
'referral': 'https://b1.run/users/new?code=D3LLBVFT',
},
'api': {
'public': {
'get': [
'ping',
'asset_pairs',
'asset_pairs/{asset_pair_name}/depth',
'asset_pairs/{asset_pair_name}/trades',
'asset_pairs/{asset_pair_name}/ticker',
'asset_pairs/{asset_pair_name}/candles',
'asset_pairs/tickers',
],
},
'private': {
'get': [
'accounts',
'fund/accounts',
'assets/{asset_symbol}/address',
'orders',
'orders/{id}',
'orders/multi',
'trades',
'withdrawals',
'deposits',
],
'post': [
'orders',
'orders/{id}/cancel',
'orders/cancel',
'withdrawals',
'transfer',
],
},
},
'fees': {
'trading': {
'maker': self.parse_number('0.001'),
'taker': self.parse_number('0.001'),
},
'funding': {
'withdraw': {},
},
},
'exceptions': {
'exact': {
'10001': BadRequest, # syntax error
'10005': ExchangeError, # internal error
"Amount's scale must greater than AssetPair's base scale": InvalidOrder,
"Price mulit with amount should larger than AssetPair's min_quote_value": InvalidOrder,
'10007': BadRequest, # parameter error, {"code":10007,"message":"Amount's scale must greater than AssetPair's base scale"}
'10011': ExchangeError, # system error
'10013': OrderNotFound, # {"code":10013,"message":"Resource not found"}
'10014': InsufficientFunds, # {"code":10014,"message":"Insufficient funds"}
'10403': PermissionDenied, # permission denied
'10429': RateLimitExceeded, # too many requests
'40004': AuthenticationError, # {"code":40004,"message":"invalid jwt"}
'40103': AuthenticationError, # invalid otp code
'40104': AuthenticationError, # invalid asset pin code
'40301': PermissionDenied, # {"code":40301,"message":"Permission denied withdrawal create"}
'40302': ExchangeError, # already requested
'40601': ExchangeError, # resource is locked
'40602': ExchangeError, # resource is depleted
'40603': InsufficientFunds, # insufficient resource
'40605': InvalidOrder, # {"code":40605,"message":"Price less than the minimum order price"}
'40120': InvalidOrder, # Order is in trading
'40121': InvalidOrder, # Order is already cancelled or filled
'60100': BadSymbol, # {"code":60100,"message":"Asset pair is suspended"}
},
'broad': {
},
},
'commonCurrencies': {
'CRE': 'Cybereits',
'FXT': 'FXTTOKEN',
'MBN': 'Mobilian Coin',
'ONE': 'BigONE Token',
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetAssetPairs(params)
#
# {
# "code":0,
# "data":[
# {
# "id":"01e48809-b42f-4a38-96b1-c4c547365db1",
# "name":"PCX-BTC",
# "quote_scale":7,
# "quote_asset":{
# "id":"0df9c3c3-255a-46d7-ab82-dedae169fba9",
# "symbol":"BTC",
# "name":"Bitcoin",
# },
# "base_asset":{
# "id":"405484f7-4b03-4378-a9c1-2bd718ecab51",
# "symbol":"PCX",
# "name":"ChainX",
# },
# "base_scale":3,
# "min_quote_value":"0.0001",
# },
# ]
# }
#
markets = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'name')
uuid = self.safe_string(market, 'id')
baseAsset = self.safe_value(market, 'base_asset', {})
quoteAsset = self.safe_value(market, 'quote_asset', {})
baseId = self.safe_string(baseAsset, 'symbol')
quoteId = self.safe_string(quoteAsset, 'symbol')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
amountPrecisionString = self.safe_string(market, 'base_scale')
pricePrecisionString = self.safe_string(market, 'quote_scale')
amountLimit = self.parse_precision(amountPrecisionString)
priceLimit = self.parse_precision(pricePrecisionString)
precision = {
'amount': int(amountPrecisionString),
'price': int(pricePrecisionString),
}
minCost = self.safe_number(market, 'min_quote_value')
entry = {
'id': id,
'uuid': uuid,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': 'spot',
'spot': True,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': self.parse_number(amountLimit),
'max': None,
},
'price': {
'min': self.parse_number(priceLimit),
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
}
result.append(entry)
return result
async def load_markets(self, reload=False, params={}):
markets = await super(bigone, self).load_markets(reload, params)
marketsByUuid = self.safe_value(self.options, 'marketsByUuid')
if (marketsByUuid is None) or reload:
marketsByUuid = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
market = self.markets[symbol]
uuid = self.safe_string(market, 'uuid')
marketsByUuid[uuid] = market
self.options['marketsByUuid'] = marketsByUuid
return markets
def parse_ticker(self, ticker, market=None):
#
# {
# "asset_pair_name":"ETH-BTC",
# "bid":{"price":"0.021593","order_count":1,"quantity":"0.20936"},
# "ask":{"price":"0.021613","order_count":1,"quantity":"2.87064"},
# "open":"0.021795",
# "high":"0.021795",
# "low":"0.021471",
# "close":"0.021613",
# "volume":"117078.90431",
# "daily_change":"-0.000182"
# }
#
marketId = self.safe_string(ticker, 'asset_pair_name')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = None
close = self.safe_number(ticker, 'close')
bid = self.safe_value(ticker, 'bid', {})
ask = self.safe_value(ticker, 'ask', {})
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(bid, 'price'),
'bidVolume': self.safe_number(bid, 'quantity'),
'ask': self.safe_number(ask, 'price'),
'askVolume': self.safe_number(ask, 'quantity'),
'vwap': None,
'open': self.safe_number(ticker, 'open'),
'close': close,
'last': close,
'previousClose': None,
'change': self.safe_number(ticker, 'daily_change'),
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}, market)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
}
response = await self.publicGetAssetPairsAssetPairNameTicker(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "asset_pair_name":"ETH-BTC",
# "bid":{"price":"0.021593","order_count":1,"quantity":"0.20936"},
# "ask":{"price":"0.021613","order_count":1,"quantity":"2.87064"},
# "open":"0.021795",
# "high":"0.021795",
# "low":"0.021471",
# "close":"0.021613",
# "volume":"117078.90431",
# "daily_change":"-0.000182"
# }
# }
#
ticker = self.safe_value(response, 'data', {})
return self.parse_ticker(ticker, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['pair_names'] = ','.join(ids)
response = await self.publicGetAssetPairsTickers(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "asset_pair_name":"PCX-BTC",
# "bid":{"price":"0.000234","order_count":1,"quantity":"0.518"},
# "ask":{"price":"0.0002348","order_count":1,"quantity":"2.348"},
# "open":"0.0002343",
# "high":"0.0002348",
# "low":"0.0002162",
# "close":"0.0002348",
# "volume":"12887.016",
# "daily_change":"0.0000005"
# },
# {
# "asset_pair_name":"GXC-USDT",
# "bid":{"price":"0.5054","order_count":1,"quantity":"40.53"},
# "ask":{"price":"0.5055","order_count":1,"quantity":"38.53"},
# "open":"0.5262",
# "high":"0.5323",
# "low":"0.5055",
# "close":"0.5055",
# "volume":"603963.05",
# "daily_change":"-0.0207"
# }
# ]
# }
#
tickers = self.safe_value(response, 'data', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_time(self, params={}):
response = await self.publicGetPing(params)
#
# {
# "data": {
# "timestamp": 1527665262168391000
# }
# }
#
data = self.safe_value(response, 'data', {})
timestamp = self.safe_integer(data, 'timestamp')
return int(timestamp / 1000000)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
}
if limit is not None:
request['limit'] = limit # default 50, max 200
response = await self.publicGetAssetPairsAssetPairNameDepth(self.extend(request, params))
#
# {
# "code":0,
# "data": {
# "asset_pair_name": "EOS-BTC",
# "bids": [
# {"price": "42", "order_count": 4, "quantity": "23.33363711"}
# ],
# "asks": [
# {"price": "45", "order_count": 2, "quantity": "4193.3283464"}
# ]
# }
# }
#
orderbook = self.safe_value(response, 'data', {})
return self.parse_order_book(orderbook, symbol, None, 'bids', 'asks', 'price', 'quantity')
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id": 38199941,
# "price": "3378.67",
# "amount": "0.019812",
# "taker_side": "ASK",
# "created_at": "2019-01-29T06:05:56Z"
# }
#
# fetchMyTrades(private)
#
# {
# "id": 10854280,
# "asset_pair_name": "XIN-USDT",
# "price": "70",
# "amount": "1",
# "taker_side": "ASK",
# "maker_order_id": 58284908,
# "taker_order_id": 58284909,
# "maker_fee": "0.0008",
# "taker_fee": "0.07",
# "side": "SELF_TRADING",
# "inserted_at": "2019-04-16T12:00:01Z"
# },
#
# {
# "id": 10854263,
# "asset_pair_name": "XIN-USDT",
# "price": "75.7",
# "amount": "12.743149",
# "taker_side": "BID",
# "maker_order_id": null,
# "taker_order_id": 58284888,
# "maker_fee": null,
# "taker_fee": "0.0025486298",
# "side": "BID",
# "inserted_at": "2019-04-15T06:20:57Z"
# }
#
timestamp = self.parse8601(self.safe_string_2(trade, 'created_at', 'inserted_at'))
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
marketId = self.safe_string(trade, 'asset_pair_name')
symbol = self.safe_symbol(marketId, market, '-')
side = self.safe_string(trade, 'side')
takerSide = self.safe_string(trade, 'taker_side')
takerOrMaker = None
if (takerSide is not None) and (side is not None) and (side != 'SELF_TRADING'):
takerOrMaker = 'taker' if (takerSide == side) else 'maker'
if side is None:
# taker side is not related to buy/sell side
# the following code is probably a mistake
side = 'sell' if (takerSide == 'ASK') else 'buy'
else:
if side == 'BID':
side = 'buy'
elif side == 'ASK':
side = 'sell'
makerOrderId = self.safe_string(trade, 'maker_order_id')
takerOrderId = self.safe_string(trade, 'taker_order_id')
orderId = None
if makerOrderId is not None:
if takerOrderId is not None:
orderId = [makerOrderId, takerOrderId]
else:
orderId = makerOrderId
elif takerOrderId is not None:
orderId = takerOrderId
id = self.safe_string(trade, 'id')
result = {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': 'limit',
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'info': trade,
}
makerCurrencyCode = None
takerCurrencyCode = None
if (market is not None) and (takerOrMaker is not None):
if side == 'buy':
if takerOrMaker == 'maker':
makerCurrencyCode = market['base']
takerCurrencyCode = market['quote']
else:
makerCurrencyCode = market['quote']
takerCurrencyCode = market['base']
else:
if takerOrMaker == 'maker':
makerCurrencyCode = market['quote']
takerCurrencyCode = market['base']
else:
makerCurrencyCode = market['base']
takerCurrencyCode = market['quote']
elif side == 'SELF_TRADING':
if takerSide == 'BID':
makerCurrencyCode = market['quote']
takerCurrencyCode = market['base']
elif takerSide == 'ASK':
makerCurrencyCode = market['base']
takerCurrencyCode = market['quote']
makerFeeCost = self.safe_string(trade, 'maker_fee')
takerFeeCost = self.safe_string(trade, 'taker_fee')
if makerFeeCost is not None:
if takerFeeCost is not None:
result['fees'] = [
{'cost': makerFeeCost, 'currency': makerCurrencyCode},
{'cost': takerFeeCost, 'currency': takerCurrencyCode},
]
else:
result['fee'] = {'cost': makerFeeCost, 'currency': makerCurrencyCode}
elif takerFeeCost is not None:
result['fee'] = {'cost': takerFeeCost, 'currency': takerCurrencyCode}
else:
result['fee'] = None
return self.safe_trade(result, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
}
response = await self.publicGetAssetPairsAssetPairNameTrades(self.extend(request, params))
#
# {
# "code": 0,
# "data": [
# {
# "id": 38199941,
# "price": "3378.67",
# "amount": "0.019812",
# "taker_side": "ASK",
# "created_at": "2019-01-29T06:05:56Z"
# },
# {
# "id": 38199934,
# "price": "3376.14",
# "amount": "0.019384",
# "taker_side": "ASK",
# "created_at": "2019-01-29T06:05:40Z"
# }
# ]
# }
#
trades = self.safe_value(response, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# close: '0.021562',
# high: '0.021563',
# low: '0.02156',
# open: '0.021563',
# time: '2019-11-21T07:54:00Z',
# volume: '59.84376'
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100 # default 100, max 500
request = {
'asset_pair_name': market['id'],
'period': self.timeframes[timeframe],
'limit': limit,
}
if since is not None:
# start = int(since / 1000)
duration = self.parse_timeframe(timeframe)
end = self.sum(since, limit * duration * 1000)
request['time'] = self.iso8601(end)
response = await self.publicGetAssetPairsAssetPairNameCandles(self.extend(request, params))
#
# {
# code: 0,
# data: [
# {
# close: '0.021656',
# high: '0.021658',
# low: '0.021652',
# open: '0.021652',
# time: '2019-11-21T09:30:00Z',
# volume: '53.08664'
# },
# {
# close: '0.021652',
# high: '0.021656',
# low: '0.021652',
# open: '0.021656',
# time: '2019-11-21T09:29:00Z',
# volume: '88.39861'
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
balances = self.safe_value(response, 'data', [])
for i in range(0, len(balances)):
balance = balances[i]
symbol = self.safe_string(balance, 'asset_symbol')
code = self.safe_currency_code(symbol)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'locked_balance')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
type = self.safe_string(params, 'type', '')
params = self.omit(params, 'type')
method = 'privateGet' + self.capitalize(type) + 'Accounts'
response = await getattr(self, method)(params)
#
# {
# "code":0,
# "data":[
# {"asset_symbol":"NKC","balance":"0","locked_balance":"0"},
# {"asset_symbol":"UBTC","balance":"0","locked_balance":"0"},
# {"asset_symbol":"READ","balance":"0","locked_balance":"0"},
# ],
# }
#
return self.parse_balance(response)
def parse_order(self, order, market=None):
#
# {
# "id": 10,
# "asset_pair_name": "EOS-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED",
# "created_at":"2019-01-29T06:05:56Z",
# "updated_at":"2019-01-29T06:05:56Z",
# }
#
id = self.safe_string(order, 'id')
marketId = self.safe_string(order, 'asset_pair_name')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
average = self.safe_string(order, 'avg_deal_price')
filled = self.safe_string(order, 'filled_amount')
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
if side == 'BID':
side = 'buy'
else:
side = 'sell'
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'updated_at'))
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': None,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': None,
'trades': None,
}, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
side = 'BID' if (side == 'buy') else 'ASK'
uppercaseType = type.upper()
request = {
'asset_pair_name': market['id'], # asset pair name BTC-USDT, required
'side': side, # order side one of "ASK"/"BID", required
'amount': self.amount_to_precision(symbol, amount), # order amount, string, required
# 'price': self.price_to_precision(symbol, price), # order price, string, required
'type': uppercaseType,
# 'operator': 'GTE', # stop orders only, GTE greater than and equal, LTE less than and equal
# 'immediate_or_cancel': False, # limit orders only, must be False when post_only is True
# 'post_only': False, # limit orders only, must be False when immediate_or_cancel is True
}
if uppercaseType == 'LIMIT':
request['price'] = self.price_to_precision(symbol, price)
else:
isStopLimit = (uppercaseType == 'STOP_LIMIT')
isStopMarket = (uppercaseType == 'STOP_MARKET')
if isStopLimit or isStopMarket:
stopPrice = self.safe_number_2(params, 'stop_price', 'stopPrice')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stop_price parameter')
request['stop_price'] = self.price_to_precision(symbol, stopPrice)
params = self.omit(params, ['stop_price', 'stopPrice'])
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostOrders(self.extend(request, params))
#
# {
# "id": 10,
# "asset_pair_name": "EOS-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED",
# "created_at":"2019-01-29T06:05:56Z",
# "updated_at":"2019-01-29T06:05:56Z"
# }
#
order = self.safe_value(response, 'data')
return self.parse_order(order, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {'id': id}
response = await self.privatePostOrdersIdCancel(self.extend(request, params))
# {
# "id": 10,
# "asset_pair_name": "EOS-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "CANCELLED",
# "created_at":"2019-01-29T06:05:56Z",
# "updated_at":"2019-01-29T06:05:56Z"
# }
order = self.safe_value(response, 'data')
return self.parse_order(order)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
}
response = await self.privatePostOrdersCancel(self.extend(request, params))
#
# {
# "code":0,
# "data": {
# "cancelled":[
# 58272370,
# 58272377
# ],
# "failed": []
# }
# }
#
return response
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {'id': id}
response = await self.privateGetOrdersId(self.extend(request, params))
order = self.safe_value(response, 'data', {})
return self.parse_order(order)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
# 'page_token': 'dxzef', # request page after self page token
# 'side': 'ASK', # 'ASK' or 'BID', optional
# 'state': 'FILLED', # 'CANCELLED', 'FILLED', 'PENDING'
# 'limit' 20, # default 20, max 200
}
if limit is not None:
request['limit'] = limit # default 20, max 200
response = await self.privateGetOrders(self.extend(request, params))
#
# {
# "code":0,
# "data": [
# {
# "id": 10,
# "asset_pair_name": "ETH-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED",
# "created_at":"2019-01-29T06:05:56Z",
# "updated_at":"2019-01-29T06:05:56Z",
# },
# ],
# "page_token":"dxzef",
# }
#
orders = self.safe_value(response, 'data', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
market = self.market(symbol)
request = {
'asset_pair_name': market['id'],
# 'page_token': 'dxzef', # request page after self page token
}
if limit is not None:
request['limit'] = limit # default 20, max 200
response = await self.privateGetTrades(self.extend(request, params))
#
# {
# "code": 0,
# "data": [
# {
# "id": 10854280,
# "asset_pair_name": "XIN-USDT",
# "price": "70",
# "amount": "1",
# "taker_side": "ASK",
# "maker_order_id": 58284908,
# "taker_order_id": 58284909,
# "maker_fee": "0.0008",
# "taker_fee": "0.07",
# "side": "SELF_TRADING",
# "inserted_at": "2019-04-16T12:00:01Z"
# },
# {
# "id": 10854263,
# "asset_pair_name": "XIN-USDT",
# "price": "75.7",
# "amount": "12.743149",
# "taker_side": "BID",
# "maker_order_id": null,
# "taker_order_id": 58284888,
# "maker_fee": null,
# "taker_fee": "0.0025486298",
# "side": "BID",
# "inserted_at": "2019-04-15T06:20:57Z"
# }
# ],
# "page_token":"dxfv"
# }
#
trades = self.safe_value(response, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_order_status(self, status):
statuses = {
'PENDING': 'open',
'FILLED': 'closed',
'CANCELLED': 'canceled',
}
return self.safe_string(statuses, status)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'state': 'PENDING',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'state': 'FILLED',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
def nonce(self):
return self.microseconds() * 1000
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
baseUrl = self.implode_hostname(self.urls['api'][api])
url = baseUrl + '/' + self.implode_params(path, params)
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
request = {
'type': 'OpenAPIV2',
'sub': self.apiKey,
'nonce': nonce,
# 'recv_window': '30', # default 30
}
jwt = self.jwt(request, self.encode(self.secret))
headers = {
'Authorization': 'Bearer ' + jwt,
}
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'asset_symbol': currency['id'],
}
response = await self.privateGetAssetsAssetSymbolAddress(self.extend(request, params))
#
# the actual response format is not the same as the documented one
# the data key contains an array in the actual response
#
# {
# "code":0,
# "message":"",
# "data":[
# {
# "id":5521878,
# "chain":"Bitcoin",
# "value":"1GbmyKoikhpiQVZ1C9sbF17mTyvBjeobVe",
# "memo":""
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
dataLength = len(data)
if dataLength < 1:
raise ExchangeError(self.id + 'fetchDepositAddress() returned empty address response')
firstElement = data[0]
address = self.safe_string(firstElement, 'value')
tag = self.safe_string(firstElement, 'memo')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
def parse_transaction_status(self, status):
statuses = {
# what are other statuses here?
'WITHHOLD': 'ok', # deposits
'UNCONFIRMED': 'pending',
'CONFIRMED': 'ok', # withdrawals
'COMPLETED': 'ok',
'PENDING': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "amount": "25.0",
# "asset_symbol": "BTS"
# "confirms": 100,
# "id": 5,
# "inserted_at": "2018-02-16T11:39:58.000Z",
# "is_internal": False,
# "kind": "default",
# "memo": "",
# "state": "WITHHOLD",
# "txid": "72e03037d144dae3d32b68b5045462b1049a0755",
# "updated_at": "2018-11-09T10:20:09.000Z",
# }
#
# fetchWithdrawals
#
# {
# "amount": "5",
# "asset_symbol": "ETH",
# "completed_at": "2018-03-15T16:13:45.610463Z",
# "customer_id": "10",
# "id": 10,
# "inserted_at": "2018-03-15T16:13:45.610463Z",
# "is_internal": True,
# "note": "2018-03-15T16:13:45.610463Z",
# "state": "CONFIRMED",
# "target_address": "0x4643bb6b393ac20a6175c713175734a72517c63d6f7"
# "txid": "0x4643bb6b393ac20a6175c713175734a72517c63d6f73a3ca90a15356f2e967da0",
# }
#
# withdraw
#
# {
# "id":1077391,
# "customer_id":1082679,
# "amount":"21.9000000000000000",
# "txid":"",
# "is_internal":false,
# "kind":"on_chain",
# "state":"PENDING",
# "inserted_at":"2020-06-03T00:50:57+00:00",
# "updated_at":"2020-06-03T00:50:57+00:00",
# "memo":"",
# "target_address":"rDYtYT3dBeuw376rvHqoZBKW3UmvguoBAf",
# "fee":"0.1000000000000000",
# "asset_symbol":"XRP"
# }
#
currencyId = self.safe_string(transaction, 'asset_symbol')
code = self.safe_currency_code(currencyId)
id = self.safe_integer(transaction, 'id')
amount = self.safe_number(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
timestamp = self.parse8601(self.safe_string(transaction, 'inserted_at'))
updated = self.parse8601(self.safe_string_2(transaction, 'updated_at', 'completed_at'))
txid = self.safe_string(transaction, 'txid')
address = self.safe_string(transaction, 'target_address')
tag = self.safe_string(transaction, 'memo')
type = 'deposit' if ('customer_id' in transaction) else 'withdrawal'
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'addressFrom': None,
'address': None,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': None,
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'page_token': 'dxzef', # request page after self page token
# 'limit': 50, # optional, default 50
# 'kind': 'string', # optional - air_drop, big_holder_dividend, default, eosc_to_eos, internal, equally_airdrop, referral_mining, one_holder_dividend, single_customer, snapshotted_airdrop, trade_mining
# 'asset_symbol': 'BTC', # optional
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset_symbol'] = currency['id']
if limit is not None:
request['limit'] = limit # default 50
response = await self.privateGetDeposits(self.extend(request, params))
#
# {
# "code": 0,
# "page_token": "NQ==",
# "data": [
# {
# "id": 5,
# "amount": "25.0",
# "confirms": 100,
# "txid": "72e03037d144dae3d32b68b5045462b1049a0755",
# "is_internal": False,
# "inserted_at": "2018-02-16T11:39:58.000Z",
# "updated_at": "2018-11-09T10:20:09.000Z",
# "kind": "default",
# "memo": "",
# "state": "WITHHOLD",
# "asset_symbol": "BTS"
# }
# ]
# }
#
deposits = self.safe_value(response, 'data', [])
return self.parse_transactions(deposits, code, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'page_token': 'dxzef', # request page after self page token
# 'limit': 50, # optional, default 50
# 'kind': 'string', # optional - air_drop, big_holder_dividend, default, eosc_to_eos, internal, equally_airdrop, referral_mining, one_holder_dividend, single_customer, snapshotted_airdrop, trade_mining
# 'asset_symbol': 'BTC', # optional
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset_symbol'] = currency['id']
if limit is not None:
request['limit'] = limit # default 50
response = await self.privateGetWithdrawals(self.extend(request, params))
#
# {
# "code": 0,
# "data": [
# {
# "id": 10,
# "customer_id": "10",
# "asset_symbol": "ETH",
# "amount": "5",
# "state": "CONFIRMED",
# "note": "2018-03-15T16:13:45.610463Z",
# "txid": "0x4643bb6b393ac20a6175c713175734a72517c63d6f73a3ca90a15356f2e967da0",
# "completed_at": "2018-03-15T16:13:45.610463Z",
# "inserted_at": "2018-03-15T16:13:45.610463Z",
# "is_internal": True,
# "target_address": "0x4643bb6b393ac20a6175c713175734a72517c63d6f7"
# }
# ],
# "page_token":"dxvf"
# }
#
withdrawals = self.safe_value(response, 'data', [])
return self.parse_transactions(withdrawals, code, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'target_address': address,
'amount': self.currency_to_precision(code, amount),
}
if tag is not None:
request['memo'] = tag
# requires write permission on the wallet
response = await self.privatePostWithdrawals(self.extend(request, params))
#
# {
# "code":0,
# "message":"",
# "data":{
# "id":1077391,
# "customer_id":1082679,
# "amount":"21.9000000000000000",
# "txid":"",
# "is_internal":false,
# "kind":"on_chain",
# "state":"PENDING",
# "inserted_at":"2020-06-03T00:50:57+00:00",
# "updated_at":"2020-06-03T00:50:57+00:00",
# "memo":"",
# "target_address":"rDYtYT3dBeuw376rvHqoZBKW3UmvguoBAf",
# "fee":"0.1000000000000000",
# "asset_symbol":"XRP"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transaction(data, currency)
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"code":10013,"message":"Resource not found"}
# {"code":40004,"message":"invalid jwt"}
#
code = self.safe_string(response, 'code')
message = self.safe_string(response, 'message')
if code != '0':
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
| 40.5
| 214
| 0.464411
|
794ceceae30087b7a1c134788793417e2c90281f
| 10,182
|
py
|
Python
|
python/setup.py
|
jmwdpk/SPARK-23674
|
029da00f0ad9d716cebcc2d523569e751b507c22
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 2
|
2018-12-21T21:08:43.000Z
|
2020-01-09T16:27:28.000Z
|
python/setup.py
|
jmwdpk/SPARK-23674
|
029da00f0ad9d716cebcc2d523569e751b507c22
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 1
|
2018-04-13T02:18:43.000Z
|
2018-04-13T02:18:43.000Z
|
python/setup.py
|
jmwdpk/SPARK-23674
|
029da00f0ad9d716cebcc2d523569e751b507c22
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 4
|
2015-11-24T07:04:38.000Z
|
2016-11-04T05:43:53.000Z
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
sys.exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/utils.py and
# ./python/run-tests.py. In case of Arrow, you should also check ./pom.xml.
_minimum_pandas_version = "0.19.2"
_minimum_pyarrow_version = "0.8.0"
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.6'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| 41.901235
| 100
| 0.642997
|
794cf051ead9cd18c6781a5e58b461962fdf1b22
| 14,699
|
py
|
Python
|
services/common/tests/slots/test_merge.py
|
rtubio/server
|
3bb15f4d4dcd543d6f95d1fda2cb737de0bb9a9b
|
[
"Apache-2.0"
] | 4
|
2015-03-23T16:34:53.000Z
|
2017-12-12T11:41:54.000Z
|
services/common/tests/slots/test_merge.py
|
rtubio/server
|
3bb15f4d4dcd543d6f95d1fda2cb737de0bb9a9b
|
[
"Apache-2.0"
] | 42
|
2015-01-08T22:21:04.000Z
|
2021-12-13T19:48:44.000Z
|
services/common/tests/slots/test_merge.py
|
rtubio/server
|
3bb15f4d4dcd543d6f95d1fda2cb737de0bb9a9b
|
[
"Apache-2.0"
] | 2
|
2015-04-04T15:23:35.000Z
|
2017-07-23T23:14:06.000Z
|
"""
Copyright 2013, 2014 Ricardo Tubio-Pardavila
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'rtubiopa@calpoly.edu'
from datetime import timedelta
from django import test
from services.common import misc, slots
class MergeSlotsTest(test.TestCase):
def setUp(self):
self.__verbose_testing = False
def test_merge_none(self):
"""UNIT test: services.common.slots.merge_slots (robustness)
Nones and empties test.
"""
self.assertCountEqual(
[], slots.merge_slots(None, None),
'[] is the expected response to (None, None)'
)
self.assertCountEqual(
[], slots.merge_slots([], []),
'[] is the expected response to ([], [])'
)
def test_merge_case_a(self):
"""UNIT test: services.common.slots.merge_slots (case A)
Case A for merging slots.
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, CASE A')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc(),
misc.get_today_utc() + timedelta(hours=1))
m = (misc.get_today_utc() + timedelta(hours=1),
misc.get_today_utc() + timedelta(hours=4))
expected_s = [p]
actual_s = slots.merge_slots([p], [m])
if self.__verbose_testing:
misc.print_list(p, name='(+) slots')
misc.print_list(m, name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(expected_s, actual_s, 'CASE A: Wrong result!')
def test_merge_case_b(self):
"""UNIT test: services.common.slots.merge_slots (case B)
Case B for merging slots.
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, CASE B')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc(),
misc.get_today_utc() + timedelta(hours=1, minutes=20))
m = (misc.get_today_utc() + timedelta(hours=1),
misc.get_today_utc() + timedelta(hours=4))
expected_s = [(p[0], m[0])]
actual_s = slots.merge_slots([p], [m])
if self.__verbose_testing:
misc.print_list(p, name='(+) slots')
misc.print_list(m, name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(expected_s, actual_s, 'CASE B: Wrong result!')
def test_merge_case_c(self):
"""UNIT test: services.common.slots.merge_slots (case C)
Case C for merging slots.
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, CASE C')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc(),
misc.get_today_utc() + timedelta(hours=5))
m = (misc.get_today_utc() + timedelta(hours=1),
misc.get_today_utc() + timedelta(hours=4))
expected_s = [(p[0], m[0]), (m[1], p[1])]
actual_s = slots.merge_slots([p], [m])
if self.__verbose_testing:
misc.print_list(p, name='(+) slots')
misc.print_list(m, name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(expected_s, actual_s, 'CASE C: Wrong result!')
def test_merge_case_d(self):
"""UNIT test: services.common.slots.merge_slots (case D)
Case D for merging slots.
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, CASE D')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=5))
m = (misc.get_today_utc() + timedelta(hours=1),
misc.get_today_utc() + timedelta(hours=4))
expected_s = [(m[1], p[1])]
actual_s = slots.merge_slots([p], [m])
if self.__verbose_testing:
misc.print_list(p, name='(+) slots')
misc.print_list(m, name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(expected_s, actual_s, 'CASE D: Wrong result!')
def test_merge_case_e(self):
"""UNIT test: services.common.slots.merge_slots (case E)
Case E for merging slots.
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, CASE E')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=3))
m = (misc.get_today_utc() + timedelta(hours=1),
misc.get_today_utc() + timedelta(hours=4))
expected_s = []
actual_s = slots.merge_slots([p], [m])
if self.__verbose_testing:
misc.print_list(p, name='(+) slots')
misc.print_list(m, name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(expected_s, actual_s, 'CASE E: Wrong result!')
def test_merge_case_f(self):
"""UNIT test: services.common.slots.merge_slots (case F)
Case F for merging slots.
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, CASE F')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=3))
m = (misc.get_today_utc() + timedelta(hours=0),
misc.get_today_utc() + timedelta(hours=1))
expected_s = [p]
actual_s = slots.merge_slots([p], [m])
if self.__verbose_testing:
misc.print_list(p, name='(+) slots')
misc.print_list(m, name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(expected_s, actual_s, 'CASE F: Wrong result!')
def test_merge_case_no_m_slots(self):
"""UNIT test: services.common.slots.merge_slots (p slots)
Case merging p slots without m slots.
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, CASE NONE M SLOTS')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=3))
q = (misc.get_today_utc() + timedelta(hours=4),
misc.get_today_utc() + timedelta(hours=5))
r = (misc.get_today_utc() + timedelta(hours=6),
misc.get_today_utc() + timedelta(hours=7))
s = (misc.get_today_utc() + timedelta(hours=8),
misc.get_today_utc() + timedelta(hours=9))
expected_s = [p, q, r, s]
actual_s = slots.merge_slots([p, q, r, s], [])
if self.__verbose_testing:
misc.print_list(p, name='(+) slots')
misc.print_list([], name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(
expected_s, actual_s, 'CASE NONE M: Wrong result!'
)
def test_merge_case_multiple_end(self):
"""UNIT test: services.common.slots.merge_slots (multiple + slots)
Case merging multiple ending (+) slots)
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, CASE MULITPLE (+)')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=3))
q = (misc.get_today_utc() + timedelta(hours=4),
misc.get_today_utc() + timedelta(hours=5))
r = (misc.get_today_utc() + timedelta(hours=6),
misc.get_today_utc() + timedelta(hours=7))
s = (misc.get_today_utc() + timedelta(hours=8),
misc.get_today_utc() + timedelta(hours=9))
m = (misc.get_today_utc() + timedelta(hours=0),
misc.get_today_utc() + timedelta(hours=1))
expected_s = [p, q, r, s]
actual_s = slots.merge_slots([p, q, r, s], [m])
if self.__verbose_testing:
misc.print_list(p, name='(+) slots')
misc.print_list(m, name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(
expected_s, actual_s, 'CASE MULTIPLE: Wrong result!'
)
def test_merge_case_complex_1(self):
"""UNIT test: services.common.slots.merge_slots (complex case #1)
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, COMPLEX CASE #1')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc() + timedelta(hours=0),
misc.get_today_utc() + timedelta(hours=1))
q = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=3))
r = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=4))
s = (misc.get_today_utc() + timedelta(hours=3),
misc.get_today_utc() + timedelta(hours=5))
m = (misc.get_today_utc() + timedelta(hours=0),
misc.get_today_utc() + timedelta(hours=3))
n = (misc.get_today_utc() + timedelta(hours=3, minutes=30),
misc.get_today_utc() + timedelta(hours=4))
expected_s = [(m[1], n[0]), (s[0], n[0]), (n[1], s[1])]
actual_s = slots.merge_slots([p, q, r, s], [m, n])
if self.__verbose_testing:
misc.print_list([p, q, r, s], name='(+) slots')
misc.print_list([m, n], name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(
expected_s, actual_s, 'COMPLEX CASE #1: Wrong result!'
)
def test_merge_case_complex_2(self):
"""UNIT test: services.common.slots.merge_slots (complex case #2)
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, COMPLEX CASE #2')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc() + timedelta(hours=0),
misc.get_today_utc() + timedelta(hours=1))
q = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=3))
r = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=4))
s = (misc.get_today_utc() + timedelta(hours=3),
misc.get_today_utc() + timedelta(hours=5))
m = (misc.get_today_utc() + timedelta(hours=0),
misc.get_today_utc() + timedelta(hours=3))
expected_s = [(m[1], r[1]), s]
actual_s = slots.merge_slots([p, q, r, s], [m])
if self.__verbose_testing:
misc.print_list([p, q, r, s], name='(+) slots')
misc.print_list([m], name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(
expected_s, actual_s, 'COMPLEX CASE #2: Wrong result!'
)
def test_merge_case_complex_3(self):
"""UNIT test: services.common.slots.merge_slots (complex case #3)
"""
if self.__verbose_testing:
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('TESTING MERGE, COMPLEX CASE #3')
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
p = (misc.get_today_utc() + timedelta(hours=0),
misc.get_today_utc() + timedelta(hours=1))
q = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=3))
r = (misc.get_today_utc() + timedelta(hours=2),
misc.get_today_utc() + timedelta(hours=4))
s = (misc.get_today_utc() + timedelta(hours=3),
misc.get_today_utc() + timedelta(hours=5))
t = (misc.get_today_utc() + timedelta(hours=6),
misc.get_today_utc() + timedelta(hours=7))
u = (misc.get_today_utc() + timedelta(hours=8),
misc.get_today_utc() + timedelta(hours=9))
v = (misc.get_today_utc() + timedelta(hours=10),
misc.get_today_utc() + timedelta(hours=11))
m = (misc.get_today_utc() + timedelta(hours=0),
misc.get_today_utc() + timedelta(hours=3))
n = (misc.get_today_utc() + timedelta(hours=3, minutes=30),
misc.get_today_utc() + timedelta(hours=4))
expected_s = [(m[1], n[0]), (s[0], n[0]), (n[1], s[1]), t, u, v]
actual_s = slots.merge_slots([p, q, r, s, t, u, v], [m, n])
if self.__verbose_testing:
misc.print_list([p, q, r, s], name='(+) slots')
misc.print_list([m, n], name='(-) slots')
misc.print_list(actual_s, name='(A) slots')
misc.print_list(expected_s, name='(EXPECTED) slots')
self.assertCountEqual(
expected_s, actual_s, 'COMPLEX CASE #1: Wrong result!'
)
| 40.051771
| 76
| 0.532621
|
794cf07a11a8f38f0824613448eedb04e24be5e1
| 276
|
py
|
Python
|
rpisec/telegram_bot/commands/enable.py
|
marclr/rpi-security
|
2f7b39c572c45169fa10a9c571bba9cf5f869254
|
[
"MIT"
] | null | null | null |
rpisec/telegram_bot/commands/enable.py
|
marclr/rpi-security
|
2f7b39c572c45169fa10a9c571bba9cf5f869254
|
[
"MIT"
] | 1
|
2021-06-01T23:14:14.000Z
|
2021-06-01T23:14:14.000Z
|
rpisec/telegram_bot/commands/enable.py
|
marclr/rpi-security
|
2f7b39c572c45169fa10a9c571bba9cf5f869254
|
[
"MIT"
] | null | null | null |
def enable(bot, update, webcontrol):
chat_id = update.message.chat_id
code, text = webcontrol.execute('detection', 'start')
if code == 200:
bot.sendMessage(chat_id=chat_id, text=text)
else:
bot.sendMessage(chat_id=chat_id, text="Try it later")
| 34.5
| 61
| 0.67029
|
794cf348dd9910d93657f138efcbbeef7f4c01e1
| 3,996
|
py
|
Python
|
assignements/S1_algotools.py
|
YoanRouleau/BachelorDIM-Lectures-Algorithms-2020
|
eafb79a096325dc9bf75c3a20520edb191bfa3e1
|
[
"MIT"
] | null | null | null |
assignements/S1_algotools.py
|
YoanRouleau/BachelorDIM-Lectures-Algorithms-2020
|
eafb79a096325dc9bf75c3a20520edb191bfa3e1
|
[
"MIT"
] | null | null | null |
assignements/S1_algotools.py
|
YoanRouleau/BachelorDIM-Lectures-Algorithms-2020
|
eafb79a096325dc9bf75c3a20520edb191bfa3e1
|
[
"MIT"
] | null | null | null |
"""
Created by Yoan ROULEAU
@author: myself
"""
from random import randint
import numpy as np
def average_above_zero(array):
'''
Receives an array as a parameter and calculates its average.
:arg
array: an array
:returns
moy: Its average
'''
som = 0
positive_element_count=0
for i in array:
if i > 0:
som += i
positive_element_count+=1
if positive_element_count > 0:
average = som/positive_element_count
else:
raise ValueError('No positive values found in the array.')
return average
def max_value(array):
'''
Receives an array as a parameter and returns is biggest value
:arg
array: an array
:returns
max: the biggest value of the array
'''
max = 0
for value in array:
if value > max:
max = value
return max
def reverse_table(array):
'''
Gets an array and reverses its values.
:param
array: An array
:return:
Reversed array
'''
arrlength = len(array)
for i in range(arrlength//2):
tmp = array[i]
endValueIndex = arrlength - i - 1
array[i] = array[endValueIndex]
array[endValueIndex] = tmp
return array
def roi_bbox(matrix):
'''
Get the bounds of an "square" assembly in a matrix.
:param
matrix: A matrix
w: matrix's width
h: matrix's height
x1: right bound x coord
y1: right bound y coord
x2: left bound x coord
y2: left bound y coord
:return:
x1, y1, x2, y2
'''
w = matrix.shape[1]
h = matrix.shape[0]
x1 = w
y1 = h
x2 = 0
y2 = 0
x = 0
y = 0
for x in range(w):
for y in range(h):
if matrix[y, x]:
if x < x1:
x1 = x
print("bound entry x1: ", x1)
if y < y1:
y1 = y
print("bound entry y1: ", y1)
if x2 < x:
x2 = x
print("bound entry x2: ", x2)
if y2 < y:
y2 = y
print("bound entry y2: ", y2)
return(x1, y1, x2, y2)
def random_fill_parse(matrix, K):
'''
Function that fills an empty matrix with a specific number of Xs given with the function.
:param
matrix: Empty matrix given with the function call
K: Numbers of awaited Xs in the matrix
:return:
Filled matrix with Xs
'''
if K < matrix.shape[0] * matrix.shape[1]:
i = 0
while i < K:
randH = randint(0, matrix.shape[0]-1)
randW = randint(0, matrix.shape[1]-1)
if matrix[randH, randW] != 'X':
matrix[randH, randW] = 'X'
i += 1
else:
raise ValueError('Numbers of Xs exceeding matrix size.')
return matrix
def my_addition(a, b):
return a+b
#Matrix used for bbox
H = 12
W = 10
matrix = np.zeros((H,W), dtype=bool)
for c in range(7, 10):
for l in range(6, 9):
matrix[l, c] = 1
matrix[2:4, 2:5] = np.ones((2, 3), dtype=bool)
#Matrix used for randomFillParse
H2 = 15
W2 = 15
matrix = np.zeros((H2,W2), dtype=str)
Tab = [50, 1, 2, 85]
average = average_above_zero(Tab)
print('Average: ', average)
print('Max: ' + str(max_value(Tab)))
print('Reverse: ' + str(reverse_table(Tab)))
bbox = roi_bbox(matrix)
print(bbox)
randomXMatrix = random_fill_parse(matrix, 25)
print(randomXMatrix)
"""
WHAT HAPPENS IF "SOM" INITIALIZATION IS FORGOTTEN ?
-> You get an error saying that Som isn't defined.
WHAT CAN YOU EXPECT IF ALL THE VALUES ARE BELLOW ZERO ?
-> If your values are bellow zero, you wont be able to access the average calculation since you're testing each
values in the array are bellow zero. In the end, the function will attempt to divide 0 by 0 (default values), and throw
and error back.
"""
| 23.232558
| 119
| 0.555556
|
794cf44882962f9234b2b0838423c7435a230281
| 4,399
|
py
|
Python
|
crawler/crawler.py
|
mtaung/bossfight_club
|
b3d4f62dd2fe917eeb2bbd022566ad5412d62f48
|
[
"MIT"
] | null | null | null |
crawler/crawler.py
|
mtaung/bossfight_club
|
b3d4f62dd2fe917eeb2bbd022566ad5412d62f48
|
[
"MIT"
] | 8
|
2018-11-02T20:04:33.000Z
|
2018-11-10T01:11:00.000Z
|
crawler/crawler.py
|
mtaung/bossfight_club
|
b3d4f62dd2fe917eeb2bbd022566ad5412d62f48
|
[
"MIT"
] | null | null | null |
import praw, pickle
from requests_html import HTMLSession
from psaw import PushshiftAPI
class Crawler:
def __init__(self, cid, sec, user, pwd, uage):
"""
A crawler object based on the praw Reddit class.
"""
self.reddit = praw.Reddit(client_id= cid,
client_secret= sec,
username= user,
password= pwd,
user_agent= uage)
self.bfSub = self.reddit.subreddit('bossfight')
self.session = HTMLSession()
self.pushShift = PushshiftAPI(self.reddit)
def extractImgurUrl(self, urlString):
"""
Processes an image url to minimise links unrecognised by discord embed.
This is to tackle an artifact returned by praw.
"""
try:
r = self.session.get(urlString)
except:
return None
if r.status_code != 200:
return None
element = r.html.find('[rel=image_src]', first=True)
if not element:
return None
"""else:
newUrlSearch = r.html.find('[itemprop=embedURL]', first=True)
newUrl = newUrlSearch.attrs.get('content')
return newUrl"""
return element.attrs.get('href')
def extractUrl(self, urlString):
if urlString.startswith('http://imgur.com') or urlString.startswith('https://imgur.com'):
return self.extractImgurUrl(urlString)
else:
return None
def getUsableUrl(self, urlString):
try:
r = self.session.head(urlString)
except:
return None
if r.status_code != 200:
return None
ctype = r.headers.get('content-type')
if not ctype:
return None
if ctype == 'image/jpeg' or ctype == 'image/png' or ctype == 'image/gif':
return urlString
else:
newUrl = self.extractUrl(urlString)
if not newUrl:
return None
return self.getUsableUrl(newUrl)
def queryPS(self, length, threshold=1000):
"""
Pulls the top {len} submissions from the subreddit within specified threshold.
Should returns a list of submission objects.
"""
results = list(self.pushShift.search_submissions(subreddit='bossfight',
limit=length, sort='desc',
sort_type='score',
score='>{}'.format(threshold),
is_video='false'))
return results
def queryTop(self):
"""
Pulls the top submissions from the subreddit of all time.
Returns a list of submission objects.
"""
self.topBf = self.bfSub.top(limit=1000)
return self.topBf
def generateBoss(self, roster):
"""
Returns a generator containing bosses from a list of submissions.
Parameters:
roster = a list of submission objects
"""
for i in roster:
topComment = [comment.body for comment in i.comments if (hasattr(comment, 'body') and comment.distinguished==None)][0]
#topComment = ''
url = self.getUsableUrl(i.url)
if not url:
continue
yield i.id, i.title, i.score, url, topComment
def weeklyUpdate(self):
"""
Returns a generator of the top 20 submissions from the subreddit of the past week.
"""
weeklyBf = self.bfSub.top(limit=20)
for i in weeklyBf:
topComment = [comment.body for comment in i.comments if (hasattr(comment, 'body') and comment.distinguished==None)][0]
url = self.getUsableUrl(i.url)
if not url:
continue
yield i.id, i.title, i.score, url, topComment
def pullBoss(self, urlIn):
"""
Returns a tuple of a boss from a specific submission url.
"""
submission = self.reddit.submission(url=urlIn)
url = self.getUsableUrl(submission.url)
if not url:
raise Exception("rip")
return (submission.id, submission.title, submission.score, url, submission.topComment)
| 37.279661
| 130
| 0.541714
|
794cf4c23b60b7b50a0c2f0da2904f8e066b49ff
| 504
|
py
|
Python
|
Lib/site-packages/troposphere/certificatemanager.py
|
bopopescu/django-estore
|
c092ffa965b8ef68e71d27d34a17fde1beacd90e
|
[
"MIT"
] | null | null | null |
Lib/site-packages/troposphere/certificatemanager.py
|
bopopescu/django-estore
|
c092ffa965b8ef68e71d27d34a17fde1beacd90e
|
[
"MIT"
] | null | null | null |
Lib/site-packages/troposphere/certificatemanager.py
|
bopopescu/django-estore
|
c092ffa965b8ef68e71d27d34a17fde1beacd90e
|
[
"MIT"
] | 2
|
2019-04-29T14:16:10.000Z
|
2020-07-23T12:04:17.000Z
|
from . import AWSObject, AWSProperty
class DomainValidationOption(AWSProperty):
props = {
'DomainName': (basestring, True),
'ValidationDomain': (basestring, True),
}
class Certificate(AWSObject):
resource_type = "AWS::CertificateManager::Certificate"
props = {
'DomainName': (basestring, True),
'DomainValidationOptions': ([DomainValidationOption], False),
'SubjectAlternativeNames': ([basestring], False),
'Tags': (list, False)
}
| 25.2
| 69
| 0.64881
|
794cf623e99ee75b007698918010165a9377f279
| 566
|
py
|
Python
|
uploader/core/exceptions.py
|
stfc/cvmfs-stratum-uploader
|
1a4ebecc53ea3e02e102c49e66ccb3009186f308
|
[
"Apache-2.0"
] | null | null | null |
uploader/core/exceptions.py
|
stfc/cvmfs-stratum-uploader
|
1a4ebecc53ea3e02e102c49e66ccb3009186f308
|
[
"Apache-2.0"
] | null | null | null |
uploader/core/exceptions.py
|
stfc/cvmfs-stratum-uploader
|
1a4ebecc53ea3e02e102c49e66ccb3009186f308
|
[
"Apache-2.0"
] | null | null | null |
class ApplicationError(Exception):
"""
Raised by application logic.l
"""
pass
class ArgumentError(ApplicationError):
"""
Raised on unexpected actions which should not occur during normal usage,
e.g. an user sends crafted HTTP header or opens URL which link does not exist anywhere in the application.
"""
pass
class ValidationError(ApplicationError):
"""
Raised when data provided by user does not match the requirements,
e.g. an user sends /root or parent directory as the argument of an action.
"""
pass
| 25.727273
| 110
| 0.69788
|
794cf69397e118cc97218cd99644e6619cbc05b6
| 69
|
py
|
Python
|
acq4/drivers/nidaq/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
acq4/drivers/nidaq/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
acq4/drivers/nidaq/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#from nidaq import *
#from SuperTask import *
| 23
| 24
| 0.637681
|
794cf769858eb97ab135cfe813c4bac9bca6cf12
| 5,160
|
py
|
Python
|
google/cloud/errorreporting_v1beta1/services/error_group_service/transports/base.py
|
DazWilkin/python-error-reporting
|
e078a158b68d10b119ec226c02a17944b59ddccb
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/errorreporting_v1beta1/services/error_group_service/transports/base.py
|
DazWilkin/python-error-reporting
|
e078a158b68d10b119ec226c02a17944b59ddccb
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/errorreporting_v1beta1/services/error_group_service/transports/base.py
|
DazWilkin/python-error-reporting
|
e078a158b68d10b119ec226c02a17944b59ddccb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.errorreporting_v1beta1.types import common
from google.cloud.errorreporting_v1beta1.types import error_group_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-errorreporting",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ErrorGroupServiceTransport(abc.ABC):
"""Abstract transport class for ErrorGroupService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self,
*,
host: str = "clouderrorreporting.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.get_group: gapic_v1.method.wrap_method(
self.get_group, default_timeout=None, client_info=client_info,
),
self.update_group: gapic_v1.method.wrap_method(
self.update_group, default_timeout=None, client_info=client_info,
),
}
@property
def get_group(
self,
) -> typing.Callable[
[error_group_service.GetGroupRequest],
typing.Union[common.ErrorGroup, typing.Awaitable[common.ErrorGroup]],
]:
raise NotImplementedError()
@property
def update_group(
self,
) -> typing.Callable[
[error_group_service.UpdateGroupRequest],
typing.Union[common.ErrorGroup, typing.Awaitable[common.ErrorGroup]],
]:
raise NotImplementedError()
__all__ = ("ErrorGroupServiceTransport",)
| 37.391304
| 84
| 0.665698
|
794cf7dd3e765b22928f6138fadb9822999326f9
| 4,575
|
py
|
Python
|
py/src/ai/h2o/sparkling/ml/params/H2OTargetEncoderParams.py
|
salliewalecka/sparkling-water
|
497306fbc7f4f374fe367f1303289db13be4ec48
|
[
"Apache-2.0"
] | null | null | null |
py/src/ai/h2o/sparkling/ml/params/H2OTargetEncoderParams.py
|
salliewalecka/sparkling-water
|
497306fbc7f4f374fe367f1303289db13be4ec48
|
[
"Apache-2.0"
] | null | null | null |
py/src/ai/h2o/sparkling/ml/params/H2OTargetEncoderParams.py
|
salliewalecka/sparkling-water
|
497306fbc7f4f374fe367f1303289db13be4ec48
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ai.h2o.sparkling.ml.params.H2OTypeConverters import H2OTypeConverters
from pyspark.ml.param import *
class H2OTargetEncoderParams(Params):
##
# Param definitions
##
foldCol = Param(
Params._dummy(),
"foldCol",
"Fold column name",
H2OTypeConverters.toNullableString())
labelCol = Param(
Params._dummy(),
"labelCol",
"Label column name",
H2OTypeConverters.toString())
inputCols = Param(
Params._dummy(),
"inputCols",
"Names of columns that will be transformed",
H2OTypeConverters.toListString())
outputCols = Param(
Params._dummy(),
"outputCols",
"Names of columns representing the result of target encoding",
H2OTypeConverters.toListString())
holdoutStrategy = Param(
Params._dummy(),
"holdoutStrategy",
"""A strategy deciding what records will be excluded when calculating the target average on the training dataset.
Options:
None - All rows are considered for the calculation
LeaveOneOut - All rows except the row the calculation is made for
KFold - Only out-of-fold data is considered (The option requires foldCol to be set.""",
H2OTypeConverters.toEnumString("ai.h2o.targetencoding.TargetEncoder$DataLeakageHandlingStrategy"))
blendedAvgEnabled = Param(
Params._dummy(),
"blendedAvgEnabled",
"""If set, the target average becomes a weighted average of the posterior average for a given
categorical level and the prior average of the target. The weight is determined by the size
of the given group that the row belongs to. By default, the blended average is disabled.""",
H2OTypeConverters.toBoolean())
blendedAvgInflectionPoint = Param(
Params._dummy(),
"blendedAvgInflectionPoint",
"""A parameter of the blended average. The bigger number is set, the groups relatively bigger to the
overall data set size will consider the global target value as a component in the weighted average.
The default value is 10.""",
H2OTypeConverters.toFloat())
blendedAvgSmoothing = Param(
Params._dummy(),
"blendedAvgSmoothing",
"""A parameter of blended average. Controls the rate of transition between a group target value
and a global target value. The default value is 20.""",
H2OTypeConverters.toFloat())
noise = Param(
Params._dummy(),
"noise",
"Amount of random noise added to output values. The default value is 0.01",
H2OTypeConverters.toFloat())
noiseSeed = Param(
Params._dummy(),
"noiseSeed",
"A seed of the generator producing the random noise",
H2OTypeConverters.toInt())
##
# Getters
##
def getFoldCol(self):
return self.getOrDefault(self.foldCol)
def getLabelCol(self):
return self.getOrDefault(self.labelCol)
def getInputCols(self):
return self.getOrDefault(self.inputCols)
def getOutputCols(self):
columns = self.getOrDefault(self.outputCols)
if not columns:
return list(map(lambda c: c + "_te", self.getInputCols()))
else:
return columns
def getHoldoutStrategy(self):
return self.getOrDefault(self.holdoutStrategy)
def getBlendedAvgEnabled(self):
return self.getOrDefault(self.blendedAvgEnabled)
def getBlendedAvgInflectionPoint(self):
return self.getOrDefault(self.blendedAvgInflectionPoint)
def getBlendedAvgSmoothing(self):
return self.getOrDefault(self.blendedAvgSmoothing)
def getNoise(self):
return self.getOrDefault(self.noise)
def getNoiseSeed(self):
return self.getOrDefault(self.noiseSeed)
| 34.923664
| 121
| 0.691366
|
794cf91e3c644e68d65294317d7a35c38876b14c
| 31,660
|
py
|
Python
|
code/ARAX/ARAXQuery/Filter_KG/remove_edges.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | null | null | null |
code/ARAX/ARAXQuery/Filter_KG/remove_edges.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | null | null | null |
code/ARAX/ARAXQuery/Filter_KG/remove_edges.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | null | null | null |
# This class will overlay the normalized google distance on a message (all edges)
#!/bin/env python3
import sys
import os
import traceback
import numpy as np
# relative imports
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../../UI/OpenAPI/python-flask-server/")
from openapi_server.models.attribute import Attribute as EdgeAttribute
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../../reasoningtool/kg-construction/")
from NormGoogleDistance import NormGoogleDistance as NGD
class RemoveEdges:
#### Constructor
def __init__(self, response, message, edge_params):
self.response = response
self.message = message
self.edge_parameters = edge_params
def check_kg_nodes(self):
qids = {}
for key, node in self.message.query_graph.nodes.items():
qids[key] = 0
for key, node in self.message.knowledge_graph.nodes.items():
if node.qnode_keys is not None:
for qid in node.qnode_keys:
qids[qid] += 1
for k, v in qids.items():
if v == 0:
self.response.error(f"Fiter removed all of the nodes in the knowledge graph with the qnode id {k}", error_code="RemovedQueryNode")
def remove_edges_by_predicate(self):
"""
Iterate over all the edges in the knowledge graph, remove any edges matching the discription provided.
:return: response
"""
self.response.debug(f"Removing Edges")
self.response.info(f"Removing edges from the knowledge graph matching the specified predicate")
edge_params = self.edge_parameters
try:
edges_to_remove = set()
node_keys_to_remove = {}
edge_qid_dict = {}
for key, q_edge in self.message.query_graph.edges.items():
edge_qid_dict[key] = {'subject':q_edge.subject, 'object':q_edge.object}
# iterate over the edges find the edges to remove
for key, edge in self.message.knowledge_graph.edges.items():
if edge_params['edge_predicate'] == edge.predicate:
edges_to_remove.add(key)
if edge_params['remove_connected_nodes']:
for qedge_key in edge.qedge_keys:
if edge.subject not in node_keys_to_remove:
node_keys_to_remove[edge.subject] = {edge_qid_dict[qedge_key]['subject']}
else:
node_keys_to_remove[edge.subject].add(edge_qid_dict[qedge_key]['subject'])
if edge.object not in node_keys_to_remove:
node_keys_to_remove[edge.object] = {edge_qid_dict[qedge_key]['object']}
else:
node_keys_to_remove[edge.object].add(edge_qid_dict[qedge_key]['object'])
if edge_params['remove_connected_nodes']:
self.response.debug(f"Removing Nodes")
self.response.info(f"Removing connected nodes and their edges from the knowledge graph")
i = 0
nodes_to_remove = set()
skipped_qnode_keys = set()
# iterate over nodes find adjacent connected nodes
for key, node in self.message.knowledge_graph.nodes.items():
if key in node_keys_to_remove:
if 'qnode_keys' in edge_params:
if node.qnode_keys is not None:
for param_qnode_key in edge_params['qnode_keys']:
if param_qnode_key in node.qnode_keys:
if len(node.qnode_keys) == 1:
nodes_to_remove.add(key)
else:
node.qnode_keys.remove(param_qnode_key)
else:
# del node_keys_to_remove[key]
skipped_qnode_keys.add(key)
else:
# del node_keys_to_remove[key]
skipped_qnode_keys.add(key)
else:
if len(node.qnode_keys) == 1:
nodes_to_remove.add(key)
else:
for node_key in node_keys_to_remove[key]:
node.qnode_keys.remove(node_key)
if len(node.qnode_keys) == 0:
nodes_to_remove.add(key)
for key in skipped_qnode_keys:
del node_keys_to_remove[key]
# remove connected nodes
#self.message.knowledge_graph.nodes = [val for idx,val in enumerate(self.message.knowledge_graph.nodes) if idx not in nodes_to_remove]
for key in nodes_to_remove:
del self.message.knowledge_graph.nodes[key]
# iterate over edges find edges connected to the nodes
for key, edge in self.message.knowledge_graph.edges.items():
if edge.subject in node_keys_to_remove or edge.object in node_keys_to_remove:
edges_to_remove.add(key)
self.check_kg_nodes()
# remove edges
#self.message.knowledge_graph.edges = [val for idx,val in enumerate(self.message.knowledge_graph.edges) if idx not in edges_to_remove]
for key in edges_to_remove:
if edge_params.get('qedge_keys',None) is not None:
if hasattr(self.message.knowledge_graph.edges[key],'qedge_keys') and self.message.knowledge_graph.edges[key].qedge_keys is not None:
qedge_key_diff = set(self.message.knowledge_graph.edges[key].qedge_keys) - set(edge_params['qedge_keys'])
if len(qedge_key_diff) < 1:
del self.message.knowledge_graph.edges[key]
else:
self.message.knowledge_graph.edges[key].qedge_keys = list(qedge_key_diff)
else:
self.response.warning(
f"The edge {key} does not have a qedge_keys property. Since a value was supplied for the qedge_keys parameter the edge was not removed.")
else:
del self.message.knowledge_graph.edges[key]
except:
tb = traceback.format_exc()
error_type, error, _ = sys.exc_info()
self.response.error(tb, error_code = error_type.__name__)
self.response.error(f"Something went wrong removing edges from the knowledge graph")
else:
self.response.info(f"Edges successfully removed")
return self.response
def remove_edges_by_property(self):
"""
Iterate over all the edges in the knowledge graph, remove any edges matching the discription provided.
:return: response
"""
self.response.debug(f"Removing Edges")
self.response.info(f"Removing edges from the knowledge graph matching the specified property")
edge_params = self.edge_parameters
# FW: Hack to allow all provided by synonyms
provided_by_attributes = {'biolink:knowledge_source',
'biolink:primary_knowledge_source',
'biolink:original_knowledge_source',
'biolink:aggregator_knowledge_source',
'biolink:supporting_data_source',
'biolink:original_source',
'provided_by'}
provided_by_flag = edge_params['edge_attribute'] in provided_by_attributes
try:
edges_to_remove = set()
node_keys_to_remove = {}
edge_qid_dict = {}
for key, q_edge in self.message.query_graph.edges.items():
edge_qid_dict[key] = {'subject':q_edge.subject, 'object':q_edge.object}
# iterate over the edges find the edges to remove
for key, edge in self.message.knowledge_graph.edges.items():
edge_dict = edge.to_dict()
# TRAPI1.0 hack to allow filtering by old properties that are now attributes
if hasattr(edge, 'attributes'):
for attribute in edge.attributes:
if hasattr(attribute, "original_attribute_name"):
if attribute.value == edge_params['value']:
edge_dict[attribute.original_attribute_name] = attribute.value
# FW: Hack to allow all provided by synonyms
if provided_by_flag and attribute.original_attribute_name in provided_by_attributes:
edge_dict[edge_params['edge_attribute']] = edge_params['value']
if hasattr(attribute, "attribute_type_id"):
if attribute.value == edge_params['value']:
edge_dict[attribute.attribute_type_id] = attribute.value
# FW: Hack to allow all provided by synonyms
if provided_by_flag and attribute.attribute_type_id in provided_by_attributes:
edge_dict[edge_params['edge_attribute']] = edge_params['value']
if edge_params['edge_attribute'] in edge_dict:
if type(edge_dict[edge_params['edge_attribute']]) == list or type(edge_dict[edge_params['edge_attribute']]) == set:
if edge_params['value'] in edge_dict[edge_params['edge_attribute']]:
edges_to_remove.add(key)
if edge_params['remove_connected_nodes']:
for qedge_key in edge.qedge_keys:
if edge.subject not in node_keys_to_remove:
node_keys_to_remove[edge.subject] = {edge_qid_dict[qedge_key]['subject']}
else:
node_keys_to_remove[edge.subject].add(edge_qid_dict[qedge_key]['subject'])
if edge.object not in node_keys_to_remove:
node_keys_to_remove[edge.object] = {edge_qid_dict[qedge_key]['object']}
else:
node_keys_to_remove[edge.object].add(edge_qid_dict[qedge_key]['object'])
else:
if edge_dict[edge_params['edge_attribute']] == edge_params['value']:
edges_to_remove.add(key)
if edge_params['remove_connected_nodes']:
for qedge_key in edge.qedge_keys:
if edge.subject not in node_keys_to_remove:
node_keys_to_remove[edge.subject] = {edge_qid_dict[qedge_key]['subject']}
else:
node_keys_to_remove[edge.subject].add(edge_qid_dict[qedge_key]['subject'])
if edge.object not in node_keys_to_remove:
node_keys_to_remove[edge.object] = {edge_qid_dict[qedge_key]['object']}
else:
node_keys_to_remove[edge.object].add(edge_qid_dict[qedge_key]['object'])
if edge_params['remove_connected_nodes']:
self.response.debug(f"Removing Nodes")
self.response.info(f"Removing connected nodes and their edges from the knowledge graph")
nodes_to_remove = set()
skipped_qnode_keys = set()
# iterate over nodes find adjacent connected nodes
for key, node in self.message.knowledge_graph.nodes.items():
if key in node_keys_to_remove:
if 'qnode_keys' in edge_params:
if node.qnode_keys is not None:
for param_qnode_key in edge_params['qnode_keys']:
if param_qnode_key in node.qnode_keys:
if len(node.qnode_keys) == 1:
nodes_to_remove.add(key)
else:
node.qnode_keys.remove(param_qnode_key)
else:
# del node_keys_to_remove[key]
skipped_qnode_keys.add(key)
else:
# del node_keys_to_remove[key]
skipped_qnode_keys.add(key)
else:
if len(node.qnode_keys) == 1:
nodes_to_remove.add(key)
else:
for node_key in node_keys_to_remove[key]:
node.qnode_keys.remove(node_key)
if len(node.qnode_keys) == 0:
nodes_to_remove.add(key)
for key in skipped_qnode_keys:
del node_keys_to_remove[key]
# remove connected nodes
#self.message.knowledge_graph.nodes = [val for idx,val in enumerate(self.message.knowledge_graph.nodes) if idx not in nodes_to_remove]
for key in nodes_to_remove:
del self.message.knowledge_graph.nodes[key]
# iterate over edges find edges connected to the nodes
for key, edge in self.message.knowledge_graph.edges.items():
if edge.subject in node_keys_to_remove or edge.object in node_keys_to_remove:
edges_to_remove.add(key)
self.check_kg_nodes()
# remove edges
#self.message.knowledge_graph.edges = [val for idx,val in enumerate(self.message.knowledge_graph.edges) if idx not in edges_to_remove]
for key in edges_to_remove:
if edge_params.get('qedge_keys',None) is not None:
if hasattr(self.message.knowledge_graph.edges[key],'qedge_keys') and self.message.knowledge_graph.edges[key].qedge_keys is not None:
qedge_key_diff = set(self.message.knowledge_graph.edges[key].qedge_keys) - set(edge_params['qedge_keys'])
if len(qedge_key_diff) < 1:
del self.message.knowledge_graph.edges[key]
else:
self.message.knowledge_graph.edges[key].qedge_keys = list(qedge_key_diff)
else:
self.response.warning(
f"The edge {key} does not have a qedge_keys property. Since a value was supplied for the qedge_keys parameter the edge was not removed.")
else:
del self.message.knowledge_graph.edges[key]
except:
tb = traceback.format_exc()
error_type, error, _ = sys.exc_info()
self.response.error(tb, error_code = error_type.__name__)
self.response.error(f"Something went wrong removing edges from the knowledge graph")
else:
self.response.info(f"Edges successfully removed")
return self.response
def remove_edges_by_attribute(self):
"""
Iterate over all the edges in the knowledge graph, remove any edges matching with the attribute provided.
:return: response
"""
self.response.debug(f"Removing Edges")
self.response.info(f"Removing edges from the knowledge graph with the specified attribute values")
edge_params = self.edge_parameters
try:
if edge_params['direction'] == 'above':
def compare(x, y):
return x > y
elif edge_params['direction'] == 'below':
def compare(x, y):
return x < y
edges_to_remove = set()
node_keys_to_remove = {}
edge_qid_dict = {}
for key, q_edge in self.message.query_graph.edges.items():
edge_qid_dict[key] = {'subject':q_edge.subject, 'object':q_edge.object}
# iterate over the edges find the edges to remove
for key, edge in self.message.knowledge_graph.edges.items(): # iterate over the edges
if hasattr(edge, 'attributes'): # check if they have attributes
if edge.attributes: # if there are any edge attributes
for attribute in edge.attributes: # for each attribute
if (hasattr(attribute, "original_attribute_name") and attribute.original_attribute_name == edge_params['edge_attribute']) or (hasattr(attribute, "attribute_type_id") and attribute.attribute_type_id == edge_params['edge_attribute']): # check if it's the desired one
if compare(float(attribute.value), edge_params['threshold']): # check if it's above/below the threshold
edges_to_remove.add(key) # mark it to be removed
if edge_params['remove_connected_nodes']: # if you want to remove the connected nodes, mark those too
for qedge_key in edge.qedge_keys:
if edge.subject not in node_keys_to_remove:
node_keys_to_remove[edge.subject] = {edge_qid_dict[qedge_key]['subject']}
else:
node_keys_to_remove[edge.subject].add(edge_qid_dict[qedge_key]['subject'])
if edge.object not in node_keys_to_remove:
node_keys_to_remove[edge.object] = {edge_qid_dict[qedge_key]['object']}
else:
node_keys_to_remove[edge.object].add(edge_qid_dict[qedge_key]['object'])
if edge_params['remove_connected_nodes']:
self.response.debug(f"Removing Nodes")
self.response.info(f"Removing connected nodes and their edges from the knowledge graph")
nodes_to_remove = set()
skipped_qnode_keys = set()
# iterate over nodes find adjacent connected nodes
for key, node in self.message.knowledge_graph.nodes.items():
if key in node_keys_to_remove:
if 'qnode_keys' in edge_params:
if node.qnode_keys is not None:
for param_qnode_key in edge_params['qnode_keys']:
if param_qnode_key in node.qnode_keys:
if len(node.qnode_keys) == 1:
nodes_to_remove.add(key)
else:
node.qnode_keys.remove(param_qnode_key)
else:
# del node_keys_to_remove[key]
skipped_qnode_keys.add(key)
else:
# del node_keys_to_remove[key]
skipped_qnode_keys.add(key)
else:
if len(node.qnode_keys) == 1:
nodes_to_remove.add(key)
else:
for node_key in node_keys_to_remove[key]:
node.qnode_keys.remove(node_key)
if len(node.qnode_keys) == 0:
nodes_to_remove.add(key)
for key in skipped_qnode_keys:
del node_keys_to_remove[key]
# remove connected nodes
#self.message.knowledge_graph.nodes = [val for idx, val in enumerate(self.message.knowledge_graph.nodes) if idx not in nodes_to_remove]
for key in nodes_to_remove:
del self.message.knowledge_graph.nodes[key]
#i = 0
c = 0
# iterate over edges find edges connected to the nodes
for key, edge in self.message.knowledge_graph.edges.items():
if edge.subject in node_keys_to_remove or edge.object in node_keys_to_remove:
edges_to_remove.add(key)
else:
c += 1
#i += 1
self.check_kg_nodes()
# remove edges
#self.message.knowledge_graph.edges = [val for idx,val in enumerate(self.message.knowledge_graph.edges) if idx not in edges_to_remove]
for key in edges_to_remove:
if edge_params.get('qedge_keys',None) is not None:
if hasattr(self.message.knowledge_graph.edges[key],'qedge_keys') and self.message.knowledge_graph.edges[key].qedge_keys is not None:
qedge_key_diff = set(self.message.knowledge_graph.edges[key].qedge_keys) - set(edge_params['qedge_keys'])
if len(qedge_key_diff) < 1:
del self.message.knowledge_graph.edges[key]
else:
self.message.knowledge_graph.edges[key].qedge_keys = list(qedge_key_diff)
else:
self.response.warning(
f"The edge {key} does not have a qedge_keys property. Since a value was supplied for the qedge_keys parameter the edge was not removed.")
else:
del self.message.knowledge_graph.edges[key]
except:
tb = traceback.format_exc()
error_type, error, _ = sys.exc_info()
self.response.error(tb, error_code=error_type.__name__)
self.response.error(f"Something went wrong removing edges from the knowledge graph")
else:
self.response.info(f"Edges successfully removed")
return self.response
def remove_edges_by_stats(self):
"""
Iterate over all the edges in the knowledge graph, remove any edges matching with the attribute provided.
:return: response
"""
self.response.debug(f"Removing Edges")
self.response.info(f"Removing edges from the knowledge graph with the specified attribute values")
edge_params = self.edge_parameters
try:
edges_to_remove = set()
node_keys_to_remove = {}
edge_qid_dict = {}
for key, q_edge in self.message.query_graph.edges.items():
edge_qid_dict[key] = {'subject':q_edge.subject, 'object':q_edge.object}
values = []
# iterate over the edges find the edges to remove
for key, edge in self.message.knowledge_graph.edges.items(): # iterate over the edges
if hasattr(edge, 'attributes'): # check if they have attributes
if edge.attributes: # if there are any edge attributes
for attribute in edge.attributes: # for each attribute
if (hasattr(attribute, "original_attribute_name") and attribute.original_attribute_name == edge_params['edge_attribute']) or (hasattr(attribute, "attribute_type_id") and attribute.attribute_type_id == edge_params['edge_attribute']): # check if it's the desired one
values.append((key,float(attribute.value), edge.subject, edge.object))
if len(values) > 0:
#print(edge_params)
if edge_params['stat'] == 'n':
#vals = [x[1] for x in values]
#print(np.min(vals),np.max(vals))
values.sort(key=lambda x:x[1])
if edge_params['top']:
values.reverse()
edge_params['threshold'] = int(edge_params['threshold'])
values = values[edge_params['threshold']:]
#vals = [x[1] for x in values]
#print(np.min(vals),np.max(vals))
elif edge_params['stat'] == 'std':
vals = [x[1] for x in values]
#print(np.min(vals),np.max(vals))
mean = np.mean(vals)
std = np.std(vals)
#print(mean)
#print(std)
if edge_params['top']:
i = 1 * edge_params['threshold']
else:
i = -1 * edge_params['threshold']
val = mean + i*std
#print(val)
if edge_params['direction'] == 'above':
values = [x for x in values if x[1]>val]
elif edge_params['direction'] == 'below':
values = [x for x in values if x[1]<val]
#vals = [x[1] for x in values]
#print(np.min(vals),np.max(vals))
elif edge_params['stat'] == 'percentile':
vals = [x[1] for x in values]
val = np.percentile(vals, edge_params['threshold'], interpolation='linear')
if edge_params['direction'] == 'above':
values = [x for x in values if x[1]>val]
elif edge_params['direction'] == 'below':
values = [x for x in values if x[1]<val]
for edge in values: # here edge = (edge index, value, subject id, object id)
edges_to_remove.add(edge[0]) # mark it to be removed
if edge_params['remove_connected_nodes']: # if you want to remove the connected nodes, mark those too
for qedge_key in self.message.knowledge_graph.edges[edge[0]].qedge_keys:
if edge[2] not in node_keys_to_remove: # edge[2] = edge subect
node_keys_to_remove[edge[2]] = {edge_qid_dict[qedge_key]['subject']}
else:
node_keys_to_remove[edge[2]].add(edge_qid_dict[qedge_key]['subject'])
if edge[3] not in node_keys_to_remove: # edge[2] = edge object
node_keys_to_remove[edge[3]] = {edge_qid_dict[qedge_key]['object']}
else:
node_keys_to_remove[edge[3]].add(edge_qid_dict[qedge_key]['object'])
if edge_params['remove_connected_nodes']:
self.response.debug(f"Removing Nodes")
self.response.info(f"Removing connected nodes and their edges from the knowledge graph")
nodes_to_remove = set()
skipped_qnode_keys = set()
# iterate over nodes find adjacent connected nodes
for key, node in self.message.knowledge_graph.nodes.items():
if key in node_keys_to_remove:
if 'qnode_keys' in edge_params:
if node.qnode_keys is not None:
for param_qnode_key in edge_params['qnode_keys']:
if param_qnode_key in node.qnode_keys:
if len(node.qnode_keys) == 1:
nodes_to_remove.add(key)
else:
node.qnode_keys.remove(param_qnode_key)
else:
# del node_keys_to_remove[key]
skipped_qnode_keys.add(key)
else:
# del node_keys_to_remove[key]
skipped_qnode_keys.add(key)
else:
if len(node.qnode_keys) == 1:
nodes_to_remove.add(key)
else:
for node_key in node_keys_to_remove[key]:
node.qnode_keys.remove(node_key)
if len(node.qnode_keys) == 0:
nodes_to_remove.add(key)
for key in skipped_qnode_keys:
del node_keys_to_remove[key]
# remove connected nodes
#self.message.knowledge_graph.nodes = [val for idx, val in enumerate(self.message.knowledge_graph.nodes) if idx not in nodes_to_remove]
for key in nodes_to_remove:
del self.message.knowledge_graph.nodes[key]
c = 0
# iterate over edges find edges connected to the nodes
for key, edge in self.message.knowledge_graph.edges.items():
if edge.subject in node_keys_to_remove or edge.object in node_keys_to_remove:
edges_to_remove.add(key)
else:
c += 1
self.check_kg_nodes()
# remove edges
#self.message.knowledge_graph.edges = [val for idx,val in enumerate(self.message.knowledge_graph.edges) if idx not in edges_to_remove]
for key in edges_to_remove:
if edge_params.get('qedge_keys',None) is not None:
if hasattr(self.message.knowledge_graph.edges[key],'qedge_keys') and self.message.knowledge_graph.edges[key].qedge_keys is not None:
qedge_key_diff = set(self.message.knowledge_graph.edges[key].qedge_keys) - set(edge_params['qedge_keys'])
if len(qedge_key_diff) < 1:
del self.message.knowledge_graph.edges[key]
else:
self.message.knowledge_graph.edges[key].qedge_keys = list(qedge_key_diff)
else:
self.response.warning(
f"The edge {key} does not have a qedge_keys property. Since a value was supplied for the qedge_keys parameter the edge was not removed.")
else:
del self.message.knowledge_graph.edges[key]
except:
tb = traceback.format_exc()
error_type, error, _ = sys.exc_info()
self.response.error(tb, error_code=error_type.__name__)
self.response.error(f"Something went wrong removing edges from the knowledge graph")
else:
self.response.info(f"Edges successfully removed")
return self.response
| 60.767754
| 293
| 0.5241
|
794cfb0b453d43483c477dd943018b562f522371
| 1,278
|
py
|
Python
|
python/leetcode/062_unique_paths.py
|
yxun/Notebook
|
680ae89a32d3f7d4fdcd541e66cea97e29efbd26
|
[
"Apache-2.0"
] | 1
|
2021-10-04T13:26:32.000Z
|
2021-10-04T13:26:32.000Z
|
python/leetcode/062_unique_paths.py
|
yxun/Notebook
|
680ae89a32d3f7d4fdcd541e66cea97e29efbd26
|
[
"Apache-2.0"
] | 3
|
2020-03-24T19:34:42.000Z
|
2022-01-21T20:15:39.000Z
|
python/leetcode/062_unique_paths.py
|
yxun/Notebook
|
680ae89a32d3f7d4fdcd541e66cea97e29efbd26
|
[
"Apache-2.0"
] | 1
|
2021-04-01T20:56:50.000Z
|
2021-04-01T20:56:50.000Z
|
#%%
"""
- Unique paths
- https://leetcode.com/problems/unique-paths
- Medium
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
Above is a 7 x 3 grid. How many possible unique paths are there?
Note: m and n will be at most 100.
Example 1:
Input: m = 3, n = 2
Output: 3
Explanation:
From the top-left corner, there are a total of 3 ways to reach the bottom-right corner:
1. Right -> Right -> Down
2. Right -> Down -> Right
3. Down -> Right -> Right
Example 2:
Input: m = 7, n = 3
Output: 28
"""
#%%
class S1:
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
import math
return math.factorial(m+n-2) // (math.factorial(m-1) * math.factorial(n-1))
#%%
# DP
class S2:
def uniquePaths(self, m, n):
if m < 1 or n < 1:
return 0
dp = [0] * n
dp[0] = 1
for i in range(0, m):
for j in range(1, n):
dp[j] += dp[j-1]
return dp[n-1]
| 22.421053
| 171
| 0.58216
|
794cfd4ea0434fe59e96e1bccc692556ac08e0f2
| 162
|
py
|
Python
|
boot.py
|
jfcherng-sublime/ST-patcher-LSP-intelephense
|
97520041a572c8e07bef59388935020257768307
|
[
"MIT"
] | 3
|
2020-11-07T07:11:18.000Z
|
2021-06-11T13:24:48.000Z
|
boot.py
|
jfcherng-sublime/ST-patcher-LSP-intelephense
|
97520041a572c8e07bef59388935020257768307
|
[
"MIT"
] | null | null | null |
boot.py
|
jfcherng-sublime/ST-patcher-LSP-intelephense
|
97520041a572c8e07bef59388935020257768307
|
[
"MIT"
] | null | null | null |
from .plugin import set_up, tear_down
from .plugin.commands import *
def plugin_loaded() -> None:
set_up()
def plugin_unloaded() -> None:
tear_down()
| 14.727273
| 37
| 0.691358
|
794cfd562f49440ca45528c1aee0a1fd51e67212
| 3,484
|
py
|
Python
|
molly/apps/contact/providers/mit.py
|
mollyproject/mollyproject
|
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
|
[
"Apache-2.0"
] | 7
|
2015-05-16T13:27:21.000Z
|
2019-08-06T11:09:24.000Z
|
molly/apps/contact/providers/mit.py
|
mollyproject/mollyproject
|
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
|
[
"Apache-2.0"
] | null | null | null |
molly/apps/contact/providers/mit.py
|
mollyproject/mollyproject
|
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
|
[
"Apache-2.0"
] | 4
|
2015-11-27T13:36:36.000Z
|
2021-03-09T17:55:53.000Z
|
from operator import itemgetter
import ldap
import ldap.filter
from molly.apps.contact.providers import BaseContactProvider, TooManyResults
class LDAPContactProvider(BaseContactProvider):
# See http://en.wikipedia.org/wiki/Nobility_particle for more information.
_NOBILITY_PARTICLES = set([
'de', 'van der', 'te', 'von', 'van', 'du', 'di'
])
def __init__(self, url, base_dn, phone_prefix='', phone_formatter=None,
alphabetical=False, query='(sn={surname})'):
self._url = url
self._base_dn = base_dn
if phone_formatter is None:
phone_formatter = lambda t: '%s%s' % (phone_prefix, t)
self._phone_formatter = phone_formatter
self.alphabetical = alphabetical
self.query = query
def normalize_query(self, cleaned_data, medium):
# Examples of initial / surname splitting
# William Bloggs is W, Bloggs
# Bloggs is , Bloggs
# W Bloggs is W, Bloggs
# Bloggs W is W, Bloggs
# Bloggs William is B, William
parts = cleaned_data['query'].split(' ')
parts = [p for p in parts if p]
i = 0
while i < len(parts)-1:
if parts[i].lower() in self._NOBILITY_PARTICLES:
parts[i:i+2] = [' '.join(parts[i:i+2])]
elif parts[i] == '':
parts[i:i+1] = []
else:
i += 1
parts = parts[:2]
if len(parts) == 1:
surname, forename = parts[0], None
elif parts[0].endswith(','):
surname, forename = parts[0][:-1], parts[1]
else:
surname, forename = parts[1], parts[0]
return {
'surname': surname,
'forename': forename,
}
def first_or_none(self, result, name):
try:
return result[1][name][0]
except (KeyError, IndexError):
return None
def perform_query(self, surname, forename):
ldap_server = ldap.initialize(self._url)
if forename is None:
forename = ''
try:
ldap_results = ldap_server.search_ext_s(
self._base_dn, ldap.SCOPE_SUBTREE,
self.query.format(
surname=ldap.filter.escape_filter_chars(surname),
forename=ldap.filter.escape_filter_chars(forename))
)
except ldap.NO_SUCH_OBJECT:
return []
except ldap.SIZELIMIT_EXCEEDED:
raise TooManyResults()
results = []
for ldap_result in ldap_results:
results.append({
'cn': self.first_or_none(ldap_result, 'cn'),
'sn': ldap_result[1].get('sn', []),
'givenName': ldap_result[1].get('givenName', []),
'telephoneNumber': map(self._phone_formatter,ldap_result[1].get('telephoneNumber', [])),
'roomNumber': ldap_result[1].get('roomNumber', []),
'title': ldap_result[1].get('title', []),
'facsimileTelephoneNumber': ldap_result[1].get('facsimileTelephoneNumber', []),
'ou': ldap_result[1].get('ou', []),
'mail': ldap_result[1].get('mail', []),
})
if self.alphabetical:
return sorted(results, key=itemgetter('sn', 'givenName'))
else:
return results
| 34.84
| 104
| 0.533295
|
794cfe102677ca87dcd8b7c801b845a511ff650f
| 6,898
|
py
|
Python
|
tests/periodic_tasks/content_diff/steps/steps_impl.py
|
uktrade/directory-tests
|
e54d3c4582bc19c10d8779d5146160fe0f644bf1
|
[
"MIT"
] | 4
|
2017-06-02T09:09:10.000Z
|
2018-01-25T19:06:12.000Z
|
tests/periodic_tasks/content_diff/steps/steps_impl.py
|
uktrade/directory-tests
|
e54d3c4582bc19c10d8779d5146160fe0f644bf1
|
[
"MIT"
] | 53
|
2016-10-27T22:31:03.000Z
|
2022-03-07T11:18:25.000Z
|
tests/periodic_tasks/content_diff/steps/steps_impl.py
|
uktrade/directory-tests
|
e54d3c4582bc19c10d8779d5146160fe0f644bf1
|
[
"MIT"
] | 3
|
2017-11-22T11:42:40.000Z
|
2022-02-21T01:20:04.000Z
|
# -*- coding: utf-8 -*-
import difflib
import json
import os
from typing import List
from urllib.parse import urljoin
import requests
from behave.runner import Context
from bs4 import BeautifulSoup
from envparse import env
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from retrying import retry
SITES_INVEST = {
"dev": env.str("DEV_INVEST_URL"),
"stage": env.str("STAGE_INVEST_URL"),
"uat": env.str("UAT_INVEST_URL"),
"prod": env.str("PROD_INVEST_URL"),
}
SITES_DOMESTIC = {
"dev": env.str("DEV_DOMESTIC_URL"),
"stage": env.str("STAGE_DOMESTIC_URL"),
"uat": env.str("UAT_DOMESTIC_URL"),
"prod": env.str("PROD_DOMESTIC_URL"),
}
SITES_FAS = {
"dev": env.str("DEV_FIND_A_SUPPLIER_URL"),
"stage": env.str("STAGE_FIND_A_SUPPLIER_URL"),
"uat": env.str("UAT_FIND_A_SUPPLIER_URL"),
"prod": env.str("PROD_FIND_A_SUPPLIER_URL"),
}
SITES_INTERNATIONAL = {
"dev": env.str("DEV_INTERNATIONAL_URL"),
"stage": env.str("STAGE_INTERNATIONAL_URL"),
"uat": env.str("UAT_INTERNATIONAL_URL"),
"prod": env.str("PROD_INTERNATIONAL_URL"),
}
BASICAUTH_USER = os.environ["DEV_BASICAUTH_USER"]
BASICAUTH_PASS = os.environ["DEV_BASICAUTH_PASS"]
def get_basic_auth():
return BASICAUTH_USER, BASICAUTH_PASS
def retry_if_network_error(exception: Exception) -> bool:
return isinstance(exception, (Timeout, ConnectionError, TooManyRedirects))
def merge_data_section_lines(lines, data_section_lines):
"""Merge data section lines into one line.
This is because:
on current invest:
<p><span>168 Milliarden GBP</span> Beitrag zur britischen Wirtschaftsleistung</p>
and on new invest (dev):
<div class="data">
<span class="data-item font-xlarge">168 Milliarden GBP</span>
<span>Beitrag zur britischen Wirtschaftsleistung</span>
</div>
"""
if data_section_lines:
index = lines.index(data_section_lines[0])
lines[index] = " ".join(data_section_lines)
lines.pop(index + 1)
def get_text(content: str, section_name: str) -> List[str]:
soup = BeautifulSoup(content, "lxml")
section = soup.find(section_name)
if not section:
section = soup.find("body")
for element in section.findAll(["script", "css", "img", "style", "select"]):
element.extract()
for element in section.select("#beta-bar"):
element.extract()
for element in section.select("#error-reporting-section-contact-us"):
element.extract()
# list of companies on FAS Industry pages
for element in section.select("#companies-section ul"):
element.extract()
data_section_lines = [
line
for span in section.findAll("div", class_="data")
for line in span.get_text().splitlines()
if line
]
lines = [line.strip() for line in section.get_text().splitlines() if line.strip()]
merge_data_section_lines(lines, data_section_lines)
return lines
@retry(
wait_fixed=30000,
stop_max_attempt_number=3,
retry_on_exception=retry_if_network_error,
wrap_exception=False,
)
def extract_page_content(
context: Context,
section: str,
endpoint: str,
service: str,
site_a: str,
site_b: str,
):
if service.lower() == "fas":
sites = SITES_FAS
elif service.lower() == "domestic":
sites = SITES_DOMESTIC
elif service.lower() == "invest":
sites = SITES_INVEST
elif service.lower() == "international":
sites = SITES_INTERNATIONAL
site_a = sites[site_a.lower()]
site_b = sites[site_b.lower()]
url_a = urljoin(site_a, endpoint) if endpoint != "/" else site_a
url_b = urljoin(site_b, endpoint) if endpoint != "/" else site_b
auth_a = get_basic_auth() if site_a.lower() != "prod" else None
auth_b = get_basic_auth() if site_b.lower() != "prod" else None
response_a = requests.get(url_a, auth=auth_a)
response_b = requests.get(url_b, auth=auth_b)
content_a = response_a.content
content_b = response_b.content
text_a = get_text(content_a, section)
text_b = get_text(content_b, section)
response_time_a = int(response_a.elapsed.total_seconds() * 1000)
response_time_b = int(response_b.elapsed.total_seconds() * 1000)
contents = {
"endpoint": endpoint,
"site_a": {
"site": site_a,
"url": url_a,
"text": text_a,
"response_time": response_time_a,
},
"site_b": {
"site": site_b,
"url": url_b,
"text": text_b,
"response_time": response_time_b,
},
}
context.contents = contents
def look_for_differences(context: Context):
contents = context.contents
endpoint = contents["endpoint"]
url_a = contents["site_a"]["url"]
url_b = contents["site_b"]["url"]
text_a = contents["site_a"]["text"]
text_b = contents["site_b"]["text"]
missing_page = "This page cannot be found"
found_on_both_sites = True
if (missing_page in text_a) and (missing_page in text_b):
text_a.append(f"Page is not present on both sites. Check {url_a}")
text_b.append(f"Page is not present on both sites. Check {url_b}")
found_on_both_sites = False
from_desc_url_a = f"<a href='{url_a}' target=_blank>{url_a}</a>"
from_desc_url_b = f"<a href='{url_b}' target=_blank>{url_b}</a>"
html = difflib.HtmlDiff(tabsize=4, wrapcolumn=120).make_file(
text_a,
text_b,
fromdesc=from_desc_url_a,
todesc=from_desc_url_b,
context=True,
numlines=1,
)
sm = difflib.SequenceMatcher(None, text_a, text_b)
contents["similarity"] = int(sm.ratio() * 100)
clean_endpoint = endpoint
if clean_endpoint.startswith("/"):
clean_endpoint = clean_endpoint[1:]
if clean_endpoint.endswith("/"):
clean_endpoint = clean_endpoint[:-1]
# https://stackoverflow.com/questions/3411771/multiple-character-replace-with-python
clean_endpoint = clean_endpoint.replace("/", "_")
clean_endpoint = clean_endpoint.replace("?", "_")
clean_endpoint = clean_endpoint.replace("=", "_")
clean_endpoint = clean_endpoint.replace("__", "_")
clean_endpoint = clean_endpoint or "home"
report_name = "./reports/{}.html".format(clean_endpoint)
with open(report_name, "w") as file:
file.write(html)
contents_file_name = "./reports/{}.json".format(clean_endpoint)
with open(contents_file_name, "w") as file:
file.write(json.dumps(contents))
assert found_on_both_sites, f"{endpoint} doesn't exist on both sites"
no_differences = "No Differences Found" in html
not_found = "This page cannot be found" in html.replace(" ", " ")
assert not not_found, f"{endpoint} was not found see {report_name}"
assert no_differences, f"Found differences on {endpoint} see {report_name}"
| 32.691943
| 88
| 0.66512
|
794cfe8ffd22e81dd96154601773956c56ed9e69
| 382
|
py
|
Python
|
Python/Project.Euler/Answers.Python/16.py
|
jinlibao/toolkits
|
529589832c130e2a33f96bb8fc3dcba952d3ecad
|
[
"MIT"
] | 1
|
2015-08-26T14:18:32.000Z
|
2015-08-26T14:18:32.000Z
|
Python/Project.Euler/Answers.Python/16.py
|
imthomasking/MATLAB-files
|
529589832c130e2a33f96bb8fc3dcba952d3ecad
|
[
"MIT"
] | null | null | null |
Python/Project.Euler/Answers.Python/16.py
|
imthomasking/MATLAB-files
|
529589832c130e2a33f96bb8fc3dcba952d3ecad
|
[
"MIT"
] | 1
|
2021-05-03T09:22:27.000Z
|
2021-05-03T09:22:27.000Z
|
# problem 16
# Project Euler
__author__ = 'Libao Jin'
__date__ = 'July 13, 2015'
def PowerDigitSum(powerOrder):
power = 2 ** powerOrder
strPower = str(power)
intPower = []
for i in strPower:
intPower.append(int(i))
# pds: PowerDigitSum
pds = sum(intPower)
return [pds, power, intPower]
def test():
powerOrder = 1000
pds = PowerDigitSum(powerOrder)
print(pds)
test()
| 16.608696
| 32
| 0.696335
|
794cff9c7495b883f924caf58ccc9b6197d4a00e
| 2,387
|
py
|
Python
|
src/jgikbase/test/idmapping/builder_test.py
|
jgi-kbase/IDMappingService
|
9d9f01662c4b09ac873174b7119d62828965e116
|
[
"MIT"
] | null | null | null |
src/jgikbase/test/idmapping/builder_test.py
|
jgi-kbase/IDMappingService
|
9d9f01662c4b09ac873174b7119d62828965e116
|
[
"MIT"
] | 118
|
2018-07-13T18:43:07.000Z
|
2019-11-13T02:52:48.000Z
|
src/jgikbase/test/idmapping/builder_test.py
|
jgi-kbase/IDMappingService
|
9d9f01662c4b09ac873174b7119d62828965e116
|
[
"MIT"
] | 1
|
2018-07-02T17:56:57.000Z
|
2018-07-02T17:56:57.000Z
|
from jgikbase.idmapping.builder import IDMappingBuilder, IDMappingBuildException
from jgikbase.idmapping.core.user import AuthsourceID
from jgikbase.test.idmapping.user_lookup_test_module import FakeUserLookup
from pytest import raises
from jgikbase.test.idmapping.test_utils import assert_exception_correct
from jgikbase.idmapping.core.user_lookup import LookupInitializationError
# this tests the parts of the builder that don't require starting up mongoDB. Those
# are tested in integration tests.
# For now, that means the UserLookup loading code.
TEST_MODULE = 'jgikbase.test.idmapping.user_lookup_test_module'
def test_build_user_lookup():
b = IDMappingBuilder()
ul = b.build_user_lookup(AuthsourceID('foo'), TEST_MODULE, {'asid': 'foo'})
assert ul.cfg == {'asid': 'foo'}
assert isinstance(ul, FakeUserLookup) is True
def test_build_user_lookup_fail_input():
a = AuthsourceID('i')
fail_build_user_lookup(None, 'm', {}, TypeError('config_authsource_id cannot be None'))
fail_build_user_lookup(a, None, {}, TypeError('factory_module cannot be None'))
fail_build_user_lookup(a, 'm', None, TypeError('config cannot be None'))
def test_build_user_lookup_fail_import():
m = 'jgikbase.test.idmapping.this_module_does_not_exist'
fail_build_user_lookup(AuthsourceID('i'), m, {}, IDMappingBuildException(
'Could not import module ' + m + ": No module named '" + m + "'"))
def test_build_user_lookup_fail_init():
fail_build_user_lookup(AuthsourceID('i'), TEST_MODULE, {'initfail': 'nope, sorry'},
LookupInitializationError('nope, sorry'))
def test_build_user_lookup_fail_init_unexpected():
fail_build_user_lookup(AuthsourceID('i'), TEST_MODULE, {'initunex': 'well crap'},
IDMappingBuildException('Could not build module ' + TEST_MODULE +
': well crap'))
def test_build_user_lookup_fail_id_mismatch():
fail_build_user_lookup(
AuthsourceID('i'), TEST_MODULE, {'asid': 'j'}, IDMappingBuildException(
'User lookup authsource ID mismatch: configuration ID is i, module reports ID j'))
def fail_build_user_lookup(asid, module, cfg, expected):
with raises(Exception) as got:
IDMappingBuilder().build_user_lookup(asid, module, cfg)
assert_exception_correct(got.value, expected)
| 41.877193
| 94
| 0.725178
|
794d012ee6be02a265aba6ab0d0f2f85e192d57b
| 5,514
|
py
|
Python
|
utils/tagSchemeConverter.py
|
mahatmaWM/NCRFpp
|
b9784edd82f6b2062ee7e324c3b22acbc1a35540
|
[
"Apache-2.0"
] | null | null | null |
utils/tagSchemeConverter.py
|
mahatmaWM/NCRFpp
|
b9784edd82f6b2062ee7e324c3b22acbc1a35540
|
[
"Apache-2.0"
] | null | null | null |
utils/tagSchemeConverter.py
|
mahatmaWM/NCRFpp
|
b9784edd82f6b2062ee7e324c3b22acbc1a35540
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-11-27 16:53:36
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2019-01-09 21:39:10
"""
convert NER/Chunking tag schemes, i.e. BIO->BIOES, BIOES->BIO, IOB->BIO, IOB->BIOES
"""
from __future__ import print_function
import sys
import logging
def BIO2BIOES(input_file, output_file):
logging.info("Convert BIO -> BIOES for file: %s" % input_file)
with open(input_file, 'r') as in_file:
fins = in_file.readlines()
fout = open(output_file, 'w')
words = []
labels = []
for line in fins:
if len(line) < 3:
sent_len = len(words)
for idx in range(sent_len):
if "-" not in labels[idx]:
fout.write(words[idx] + " " + labels[idx] + "\n")
else:
label_type = labels[idx].split('-')[-1]
if "B-" in labels[idx]:
if (idx == sent_len - 1) or ("I-" not in labels[idx + 1]):
fout.write(words[idx] + " S-" + label_type + "\n")
else:
fout.write(words[idx] + " B-" + label_type + "\n")
elif "I-" in labels[idx]:
if (idx == sent_len - 1) or ("I-" not in labels[idx + 1]):
fout.write(words[idx] + " E-" + label_type + "\n")
else:
fout.write(words[idx] + " I-" + label_type + "\n")
fout.write('\n')
words = []
labels = []
else:
pair = line.strip('\n').split()
words.append(pair[0])
labels.append(pair[-1].upper())
fout.close()
logging.info("BIOES file generated: %s" % output_file)
def BIOES2BIO(input_file, output_file):
logging.info("Convert BIOES -> BIO for file:", input_file)
with open(input_file, 'r') as in_file:
fins = in_file.readlines()
fout = open(output_file, 'w')
words = []
labels = []
for line in fins:
if len(line) < 3:
sent_len = len(words)
for idx in range(sent_len):
if "-" not in labels[idx]:
fout.write(words[idx] + " " + labels[idx] + "\n")
else:
label_type = labels[idx].split('-')[-1]
if "E-" in labels[idx]:
fout.write(words[idx] + " I-" + label_type + "\n")
elif "S-" in labels[idx]:
fout.write(words[idx] + " B-" + label_type + "\n")
else:
fout.write(words[idx] + " " + labels[idx] + "\n")
fout.write('\n')
words = []
labels = []
else:
pair = line.strip('\n').split()
words.append(pair[0])
labels.append(pair[-1].upper())
fout.close()
logging.info("BIO file generated:", output_file)
def IOB2BIO(input_file, output_file):
logging.info("Convert IOB -> BIO for file:", input_file)
with open(input_file, 'r') as in_file:
fins = in_file.readlines()
fout = open(output_file, 'w')
words = []
labels = []
for line in fins:
if len(line) < 3:
sent_len = len(words)
for idx in range(sent_len):
if "I-" in labels[idx]:
label_type = labels[idx].split('-')[-1]
if (idx == 0) or (labels[idx - 1] == "O") or (label_type != labels[idx - 1].split('-')[-1]):
fout.write(words[idx] + " B-" + label_type + "\n")
else:
fout.write(words[idx] + " " + labels[idx] + "\n")
else:
fout.write(words[idx] + " " + labels[idx] + "\n")
fout.write('\n')
words = []
labels = []
else:
pair = line.strip('\n').split()
words.append(pair[0])
labels.append(pair[-1].upper())
fout.close()
logging.info("BIO file generated:", output_file)
def choose_label(input_file, output_file):
with open(input_file, 'r') as in_file:
fins = in_file.readlines()
with open(output_file, 'w') as fout:
for line in fins:
if len(line) < 3:
fout.write(line)
else:
pairs = line.strip('\n').split(' ')
fout.write(pairs[0] + " " + pairs[-1] + "\n")
if __name__ == '__main__':
'''Convert NER tag schemes among IOB/BIO/BIOES.
For example: if you want to convert the IOB tag scheme to BIO, then you run as following:
python tagSchemeConverter.py IOB2BIO input_iob_file output_bio_file
Input data format is the standard CoNLL 2003 data format.
'''
if sys.argv[1].upper() == "IOB2BIO":
IOB2BIO(sys.argv[2], sys.argv[3])
elif sys.argv[1].upper() == "BIO2BIOES":
BIO2BIOES(sys.argv[2], sys.argv[3])
elif sys.argv[1].upper() == "BIOES2BIO":
BIOES2BIO(sys.argv[2], sys.argv[3])
elif sys.argv[1].upper() == "IOB2BIOES":
IOB2BIO(sys.argv[2], "temp")
BIO2BIOES("temp", sys.argv[3])
else:
logging.info("Argument error: sys.argv[1] should belongs to \"IOB2BIO/BIO2BIOES/BIOES2BIO/IOB2BIOES\"")
| 38.291667
| 113
| 0.479144
|
794d01339ce9423bdc947f567695675d1f77ff66
| 8,957
|
py
|
Python
|
tests/core/full_node/test_conditions.py
|
todortron/chaingreen-blockchain
|
89fe435e5dc87de4a7bb4d64c1ad335d81f24b95
|
[
"Apache-2.0"
] | 1
|
2021-11-12T20:30:23.000Z
|
2021-11-12T20:30:23.000Z
|
tests/core/full_node/test_conditions.py
|
morrillup/chaingreen-blockchain
|
0b2d008dd10228670decf360d21448a65fce48a4
|
[
"Apache-2.0"
] | 19
|
2021-09-07T08:07:05.000Z
|
2022-03-29T08:10:34.000Z
|
tests/core/full_node/test_conditions.py
|
morrillup/chaingreen-blockchain
|
0b2d008dd10228670decf360d21448a65fce48a4
|
[
"Apache-2.0"
] | null | null | null |
"""
These are quick-to-run test that check spends can be added to the blockchain when they're valid
or that they're failing for the right reason when they're invalid.
"""
import atexit
import logging
import time
from typing import List, Optional, Tuple
import pytest
from blspy import G2Element
from clvm_tools.binutils import assemble
from chaingreen.consensus.blockchain import ReceiveBlockResult
from chaingreen.consensus.constants import ConsensusConstants
from chaingreen.types.announcement import Announcement
from chaingreen.types.blockchain_format.program import Program
from chaingreen.types.coin_record import CoinRecord
from chaingreen.types.coin_spend import CoinSpend
from chaingreen.types.condition_opcodes import ConditionOpcode
from chaingreen.types.full_block import FullBlock
from chaingreen.types.spend_bundle import SpendBundle
from chaingreen.util.errors import Err
from chaingreen.util.ints import uint32
from tests.block_tools import create_block_tools, test_constants
from tests.util.keyring import TempKeyring
from .ram_db import create_ram_blockchain
def cleanup_keyring(keyring: TempKeyring):
keyring.cleanup()
temp_keyring = TempKeyring()
keychain = temp_keyring.get_keychain()
atexit.register(cleanup_keyring, temp_keyring) # Attempt to cleanup the temp keychain
bt = create_block_tools(constants=test_constants, keychain=keychain)
log = logging.getLogger(__name__)
# This puzzle simply returns the solution as conditions.
# We call it the `EASY_PUZZLE` because it's pretty easy to solve.
EASY_PUZZLE = Program.to(assemble("1"))
EASY_PUZZLE_HASH = EASY_PUZZLE.get_tree_hash()
def initial_blocks(block_count: int = 4) -> List[FullBlock]:
blocks = bt.get_consecutive_blocks(
block_count,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=EASY_PUZZLE_HASH,
pool_reward_puzzle_hash=EASY_PUZZLE_HASH,
)
return blocks
async def check_spend_bundle_validity(
constants: ConsensusConstants,
blocks: List[FullBlock],
spend_bundle: SpendBundle,
expected_err: Optional[Err] = None,
) -> Tuple[List[CoinRecord], List[CoinRecord]]:
"""
This test helper create an extra block after the given blocks that contains the given
`SpendBundle`, and then invokes `receive_block` to ensure that it's accepted (if `expected_err=None`)
or fails with the correct error code.
"""
try:
connection, blockchain = await create_ram_blockchain(constants)
for block in blocks:
received_block_result, err, fork_height, coin_changes = await blockchain.receive_block(block)
assert err is None
additional_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
newest_block = additional_blocks[-1]
received_block_result, err, fork_height, coin_changes = await blockchain.receive_block(newest_block)
if fork_height:
coins_added = await blockchain.coin_store.get_coins_added_at_height(uint32(fork_height + 1))
coins_removed = await blockchain.coin_store.get_coins_removed_at_height(uint32(fork_height + 1))
else:
coins_added = []
coins_removed = []
if expected_err is None:
assert err is None
assert received_block_result == ReceiveBlockResult.NEW_PEAK
assert fork_height == len(blocks) - 1
else:
assert err == expected_err
assert received_block_result == ReceiveBlockResult.INVALID_BLOCK
assert fork_height is None
return coins_added, coins_removed
finally:
# if we don't close the connection, the test process doesn't exit cleanly
await connection.close()
# we must call `shut_down` or the executor in `Blockchain` doesn't stop
blockchain.shut_down()
async def check_conditions(
condition_solution: Program, expected_err: Optional[Err] = None, spend_reward_index: int = -2
):
blocks = initial_blocks()
coin = list(blocks[spend_reward_index].get_included_reward_coins())[0]
coin_spend = CoinSpend(coin, EASY_PUZZLE, condition_solution)
spend_bundle = SpendBundle([coin_spend], G2Element())
# now let's try to create a block with the spend bundle and ensure that it doesn't validate
await check_spend_bundle_validity(bt.constants, blocks, spend_bundle, expected_err=expected_err)
class TestConditions:
@pytest.mark.asyncio
async def test_invalid_block_age(self):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_RELATIVE[0]} 2))"))
await check_conditions(conditions, expected_err=Err.ASSERT_HEIGHT_RELATIVE_FAILED)
@pytest.mark.asyncio
async def test_valid_block_age(self):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_RELATIVE[0]} 1))"))
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_block_height(self):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE[0]} 4))"))
await check_conditions(conditions, expected_err=Err.ASSERT_HEIGHT_ABSOLUTE_FAILED)
@pytest.mark.asyncio
async def test_valid_block_height(self):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE[0]} 3))"))
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_my_id(self):
blocks = initial_blocks()
coin = list(blocks[-2].get_included_reward_coins())[0]
wrong_name = bytearray(coin.name())
wrong_name[-1] ^= 1
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{wrong_name.hex()}))"))
await check_conditions(conditions, expected_err=Err.ASSERT_MY_COIN_ID_FAILED)
@pytest.mark.asyncio
async def test_valid_my_id(self):
blocks = initial_blocks()
coin = list(blocks[-2].get_included_reward_coins())[0]
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{coin.name().hex()}))"))
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_seconds_absolute(self):
# TODO: make the test suite not use `time.time` so we can more accurately
# set `time_now` to make it minimal while still failing
time_now = int(time.time()) + 3000
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_SECONDS_ABSOLUTE[0]} {time_now}))"))
await check_conditions(conditions, expected_err=Err.ASSERT_SECONDS_ABSOLUTE_FAILED)
@pytest.mark.asyncio
async def test_valid_seconds_absolute(self):
time_now = int(time.time())
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_SECONDS_ABSOLUTE[0]} {time_now}))"))
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_coin_announcement(self):
blocks = initial_blocks()
coin = list(blocks[-2].get_included_reward_coins())[0]
announce = Announcement(coin.name(), b"test_bad")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(conditions, expected_err=Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
@pytest.mark.asyncio
async def test_valid_coin_announcement(self):
blocks = initial_blocks()
coin = list(blocks[-2].get_included_reward_coins())[0]
announce = Announcement(coin.name(), b"test")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_puzzle_announcement(self):
announce = Announcement(EASY_PUZZLE_HASH, b"test_bad")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(conditions, expected_err=Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
@pytest.mark.asyncio
async def test_valid_puzzle_announcement(self):
announce = Announcement(EASY_PUZZLE_HASH, b"test")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(conditions)
| 38.943478
| 110
| 0.70671
|
794d016b07b6175381434b376a754f2fb94c51ed
| 489
|
py
|
Python
|
video_capture/capture.py
|
FrostyDesigner/Python_Scripts
|
ec9dcf1a8787e60e40cd72260618739a681087ef
|
[
"Unlicense"
] | 1
|
2021-07-05T22:30:47.000Z
|
2021-07-05T22:30:47.000Z
|
video_capture/capture.py
|
FrostyDesigner/Python_Scripts
|
ec9dcf1a8787e60e40cd72260618739a681087ef
|
[
"Unlicense"
] | 8
|
2020-03-24T15:58:07.000Z
|
2022-03-11T23:26:05.000Z
|
video_capture/capture.py
|
FrostyDesigner/Python_Scripts
|
ec9dcf1a8787e60e40cd72260618739a681087ef
|
[
"Unlicense"
] | null | null | null |
import cv2, time
video=cv2.VideoCapture(0)
a=1
while True:
a=a+1
check, frame = video.read()
print(check)
print(frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#time.sleep(3)
cv2.imshow("Capturing", gray)
#key=cv2.waitKey(1000) # 1 second intevals
key=cv2.waitKey(1) # 1 millisecond intervals
#this is to break the loop with a "q" key (for quit)
if key==ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows()
| 17.464286
| 56
| 0.635992
|
794d02bc0c0c9f380584bc1f290d13395ed65e1b
| 1,214
|
py
|
Python
|
ProjectFiles/bin/Release/2.80/scripts/addons/uv_magic_uv/legacy/__init__.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2019-07-08T15:51:14.000Z
|
2019-07-08T15:51:14.000Z
|
ProjectFiles/bin/Release/2.80/scripts/addons/uv_magic_uv/legacy/__init__.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
ProjectFiles/bin/Release/2.80/scripts/addons/uv_magic_uv/legacy/__init__.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# <pep8-80 compliant>
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Nutti <nutti.metro@gmail.com>"
__status__ = "production"
__version__ = "5.2"
__date__ = "17 Nov 2018"
if "bpy" in locals():
import importlib
importlib.reload(op)
importlib.reload(ui)
importlib.reload(properites)
importlib.reload(preferences)
else:
from . import op
from . import ui
from . import properites
from . import preferences
import bpy
| 31.128205
| 74
| 0.716639
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.